diff --git a/lib/kokkos/.gitignore b/lib/kokkos/.gitignore new file mode 100644 index 0000000000..f9d16be155 --- /dev/null +++ b/lib/kokkos/.gitignore @@ -0,0 +1,8 @@ +# Standard ignores +*~ +*.pyc +\#*# +.#* +.*.swp +.cproject +.project diff --git a/lib/kokkos/CMakeLists.txt b/lib/kokkos/CMakeLists.txt new file mode 100644 index 0000000000..0d437c0f8f --- /dev/null +++ b/lib/kokkos/CMakeLists.txt @@ -0,0 +1,123 @@ + +# +# A) Forward delcare the package so that certain options are also defined for +# subpackages +# + +TRIBITS_PACKAGE_DECL(Kokkos) # ENABLE_SHADOWING_WARNINGS) + +#------------------------------------------------------------------------------ +# +# B) Define the common options for Kokkos first so they can be used by +# subpackages as well. +# + +TRIBITS_ADD_DEBUG_OPTION() + +TRIBITS_ADD_OPTION_AND_DEFINE( + Kokkos_ENABLE_SIERRA_BUILD + KOKKOS_FOR_SIERRA + "Configure Kokkos for building within the Sierra build system." + OFF + ) + +TRIBITS_ADD_OPTION_AND_DEFINE( + Kokkos_ENABLE_Cuda + KOKKOS_HAVE_CUDA + "Enable CUDA support in Kokkos." + "${TPL_ENABLE_CUDA}" + ) + +TRIBITS_ADD_OPTION_AND_DEFINE( + Kokkos_ENABLE_Cuda_UVM + KOKKOS_USE_CUDA_UVM + "Enable CUDA Unified Virtual Memory support in Kokkos." + OFF + ) + +TRIBITS_ADD_OPTION_AND_DEFINE( + Kokkos_ENABLE_Pthread + KOKKOS_HAVE_PTHREAD + "Enable Pthread support in Kokkos." + "${TPL_ENABLE_Pthread}" + ) + +TRIBITS_ADD_OPTION_AND_DEFINE( + Kokkos_ENABLE_OpenMP + KOKKOS_HAVE_OPENMP + "Enable OpenMP support in Kokkos." + "${${PROJECT_NAME}_ENABLE_OpenMP}" + ) + +TRIBITS_ADD_OPTION_AND_DEFINE( + Kokkos_ENABLE_QTHREAD + KOKKOS_HAVE_QTHREAD + "Enable QTHREAD support in Kokkos." + "${TPL_ENABLE_QTHREAD}" + ) + +TRIBITS_ADD_OPTION_AND_DEFINE( + Kokkos_ENABLE_CXX11 + KOKKOS_HAVE_CXX11 + "Enable C++11 support in Kokkos." + "${${PROJECT_NAME}_ENABLE_CXX11}" + ) + +TRIBITS_ADD_OPTION_AND_DEFINE( + Kokkos_ENABLE_HWLOC + KOKKOS_HAVE_HWLOC + "Enable HWLOC support in Kokkos." + "${TPL_ENABLE_HWLOC}" + ) + +TRIBITS_ADD_OPTION_AND_DEFINE( + Kokkos_ENABLE_MPI + KOKKOS_HAVE_MPI + "Enable MPI support in Kokkos." + "${TPL_ENABLE_MPI}" + ) + +TRIBITS_ADD_OPTION_AND_DEFINE( + Kokkos_ENABLE_Debug_Bounds_Check + KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK + "Enable bounds checking support in Kokkos." + OFF + ) + +#TRIBITS_ADD_OPTION_AND_DEFINE( +# Kokkos_ENABLE_Profiling_Collect_Kernel_Data +# KOKKOS_ENABLE_PROFILING_COLLECT_KERNEL_DATA +# "Enable profiling support for kernel data collections in Kokkos." +# "${${PROJECT_NAME}_ENABLE_KokkosProfiler}" +# ) + +# placeholder for future device... +TRIBITS_ADD_OPTION_AND_DEFINE( + Kokkos_ENABLE_Winthread + KOKKOS_HAVE_WINTHREAD + "Enable Winthread support in Kokkos." + "${TPL_ENABLE_Winthread}" + ) + +#------------------------------------------------------------------------------ +# +# C) Process the subpackages for Kokkos +# + +TRIBITS_PROCESS_SUBPACKAGES() + +# +# D) If Kokkos itself is enabled, process the Kokkos package +# + +TRIBITS_PACKAGE_DEF() + +TRIBITS_EXCLUDE_AUTOTOOLS_FILES() + +TRIBITS_EXCLUDE_FILES( + classic/doc + classic/LinAlg/doc/CrsRefactorNotesMay2012 + ) + +TRIBITS_PACKAGE_POSTPROCESS() + diff --git a/lib/kokkos/HOW_TO_SNAPSHOT b/lib/kokkos/HOW_TO_SNAPSHOT new file mode 100644 index 0000000000..46bfb4167f --- /dev/null +++ b/lib/kokkos/HOW_TO_SNAPSHOT @@ -0,0 +1,73 @@ + +Developers of Kokkos (those who commit modifications to Kokkos) +must maintain the snapshot of Kokkos in the Trilinos repository. + +This file contains instructions for how to +snapshot Kokkos from github.com/kokkos to Trilinos. + +------------------------------------------------------------------------ +*** EVERYTHING GOES RIGHT WORKFLOW *** + +1) Given a 'git clone' of Kokkos and of Trilinos repositories. +1.1) Let ${KOKKOS} be the absolute path to the Kokkos clone. + This path *must* terminate with the directory name 'kokkos'; + e.g., ${HOME}/kokkos . +1.2) Let ${TRILINOS} be the absolute path to the Trilinos directory. + +2) Given that the Kokkos build & test is clean and + changes are committed to the Kokkos clone. + +3) Snapshot the current commit in the Kokkos clone into the Trilinos clone. + This overwrites ${TRILINOS}/packages/kokkos with the content of ${KOKKOS}: + ${KOKKOS}/config/snapshot.py --verbose ${KOKKOS} ${TRILINOS}/packages + +4) Verify the snapshot commit happened as expected + cd ${TRILINOS}/packages/kokkos + git log -1 --name-only + +5) Modify, build, and test Trilinos with the Kokkos snapshot. + +6) Given that that the Trilinos build & test is clean and + changes are committed to the Trilinos clone. + +7) Attempt push to the Kokkos repository. + If push fails then you must 'remove the Kokkos snapshot' + from your Trilinos clone. + See below. + +8) Attempt to push to the Trilinos repository. + If updating for a failed push requires you to change Kokkos you must + 'remove the Kokkos snapshot' from your Trilinos clone. + See below. + +------------------------------------------------------------------------ +*** WHEN SOMETHING GOES WRONG AND YOU MUST *** +*** REMOVE THE KOKKOS SNAPSHOT FROM YOUR TRILINOS CLONE *** + +1) Query the Trilinos clone commit log. + git log --oneline + +2) Note the of the commit to the Trillinos clone + immediately BEFORE the Kokkos snapshot commit. + Copy this for use in the next command. + +3) IF more than one outstanding commit then you can remove just the + Kokkos snapshot commit with 'git rebase -i'. Edit the rebase file. + Remove or comment out the Kokkos snapshot commit entry. + git rebase -i + +4) IF the Kokkos snapshot commit is the one and only + outstanding commit then remove just than commit. + git reset --hard HEAD~1 + +------------------------------------------------------------------------ +*** REGARDING 'snapshot.py' TOOL *** + +The 'snapshot.py' tool is developed and maintained by the +Center for Computing Research (CCR) +Software Engineering, Maintenance, and Support (SEMS) team. + +Contact Brent Perschbacher for questions> + +------------------------------------------------------------------------ + diff --git a/lib/kokkos/Makefile.kokkos b/lib/kokkos/Makefile.kokkos index 30ecec3364..cf6d3f3971 100644 --- a/lib/kokkos/Makefile.kokkos +++ b/lib/kokkos/Makefile.kokkos @@ -1,20 +1,18 @@ # Default settings common options -KOKKOS_PATH=../../lib/kokkos - #Options: OpenMP,Serial,Pthreads,Cuda -KOKKOS_DEVICES ?= "OpenMP" -#KOKKOS_DEVICES ?= "Pthreads" -#Options: KNC,SNB,HSW,Kepler,Kepler30,Kepler32,Kepler35,Kepler37,Maxwell,Maxwell50,Maxwell52,Maxwell53,ARMv8,BGQ,Power7,Power8 +#KOKKOS_DEVICES ?= "OpenMP" +KOKKOS_DEVICES ?= "Pthreads" +#Options: KNC,SNB,HSW,Kepler,Kepler30,Kepler32,Kepler35,Kepler37,Maxwell,Maxwell50,Maxwell52,Maxwell53,ARMv8,BGQ,Power7,Power8,KNL KOKKOS_ARCH ?= "" #Options: yes,no KOKKOS_DEBUG ?= "no" -#Options: hwloc,librt +#Options: hwloc,librt,experimental_memkind KOKKOS_USE_TPLS ?= "" #Options: c++11 KOKKOS_CXX_STANDARD ?= "c++11" -#Options: kernel_times,aggregate_mpi -KOKKOS_PROFILING ?= "" +#Options: aggressive_vectorization +KOKKOS_OPTIONS ?= "" #Default settings specific options #Options: force_uvm,use_ldg,rdc,enable_lambda @@ -30,8 +28,10 @@ KOKKOS_INTERNAL_ENABLE_CXX11 := $(strip $(shell echo $(KOKKOS_CXX_STANDARD) | gr # Check for external libraries KOKKOS_INTERNAL_USE_HWLOC := $(strip $(shell echo $(KOKKOS_USE_TPLS) | grep "hwloc" | wc -l)) KOKKOS_INTERNAL_USE_LIBRT := $(strip $(shell echo $(KOKKOS_USE_TPLS) | grep "librt" | wc -l)) +KOKKOS_INTERNAL_USE_MEMKIND := $(strip $(shell echo $(KOKKOS_USE_TPLS) | grep "experimental_memkind" | wc -l)) # Check for advanced settings +KOKKOS_INTERNAL_OPT_RANGE_AGGRESSIVE_VECTORIZATION := $(strip $(shell echo $(KOKKOS_OPTIONS) | grep "aggressive_vectorization" | wc -l)) KOKKOS_INTERNAL_CUDA_USE_LDG := $(strip $(shell echo $(KOKKOS_CUDA_OPTIONS) | grep "use_ldg" | wc -l)) KOKKOS_INTERNAL_CUDA_USE_UVM := $(strip $(shell echo $(KOKKOS_CUDA_OPTIONS) | grep "force_uvm" | wc -l)) KOKKOS_INTERNAL_CUDA_USE_RELOC := $(strip $(shell echo $(KOKKOS_CUDA_OPTIONS) | grep "rdc" | wc -l)) @@ -50,10 +50,11 @@ ifeq ($(KOKKOS_INTERNAL_USE_PTHREADS), 0) endif endif -KOKKOS_INTERNAL_COMPILER_PGI := $(shell $(CXX) --version 2>&1 | grep PGI | wc -l) -KOKKOS_INTERNAL_COMPILER_XL := $(shell $(CXX) -qversion 2>&1 | grep XL | wc -l) -KOKKOS_INTERNAL_COMPILER_CRAY := $(shell $(CXX) -craype-verbose 2>&1 | grep "CC-" | wc -l) -KOKKOS_INTERNAL_OS_CYGWIN := $(shell uname | grep CYGWIN | wc -l) +KOKKOS_INTERNAL_COMPILER_INTEL := $(shell $(CXX) --version 2>&1 | grep "Intel Corporation" | wc -l) +KOKKOS_INTERNAL_COMPILER_PGI := $(shell $(CXX) --version 2>&1 | grep PGI | wc -l) +KOKKOS_INTERNAL_COMPILER_XL := $(shell $(CXX) -qversion 2>&1 | grep XL | wc -l) +KOKKOS_INTERNAL_COMPILER_CRAY := $(shell $(CXX) -craype-verbose 2>&1 | grep "CC-" | wc -l) +KOKKOS_INTERNAL_OS_CYGWIN := $(shell uname | grep CYGWIN | wc -l) ifeq ($(KOKKOS_INTERNAL_COMPILER_PGI), 1) KOKKOS_INTERNAL_OPENMP_FLAG := -mp @@ -93,8 +94,10 @@ KOKKOS_INTERNAL_USE_CUDA := $(strip $(shell echo $(KOKKOS_DEVICES) | grep Cuda | KOKKOS_INTERNAL_USE_ARCH_KNC := $(strip $(shell echo $(KOKKOS_ARCH) | grep KNC | wc -l)) KOKKOS_INTERNAL_USE_ARCH_SNB := $(strip $(shell echo $(KOKKOS_ARCH) | grep SNB | wc -l)) KOKKOS_INTERNAL_USE_ARCH_HSW := $(strip $(shell echo $(KOKKOS_ARCH) | grep HSW | wc -l)) +KOKKOS_INTERNAL_USE_ARCH_KNL := $(strip $(shell echo $(KOKKOS_ARCH) | grep KNL | wc -l)) #NVIDIA based +NVCC_WRAPPER := $(KOKKOS_PATH)/config/nvcc_wrapper KOKKOS_INTERNAL_USE_ARCH_KEPLER30 := $(strip $(shell echo $(KOKKOS_ARCH) | grep Kepler30 | wc -l)) KOKKOS_INTERNAL_USE_ARCH_KEPLER32 := $(strip $(shell echo $(KOKKOS_ARCH) | grep Kepler32 | wc -l)) KOKKOS_INTERNAL_USE_ARCH_KEPLER35 := $(strip $(shell echo $(KOKKOS_ARCH) | grep Kepler35 | wc -l)) @@ -135,8 +138,9 @@ KOKKOS_INTERNAL_USE_ARCH_IBM := $(strip $(shell echo $(KOKKOS_INTERNAL_USE_ARCH_ KOKKOS_INTERNAL_USE_ARCH_AMDAVX := $(strip $(shell echo $(KOKKOS_ARCH) | grep AMDAVX | wc -l)) #Any AVX? -KOKKOS_INTERNAL_USE_ARCH_AVX := $(strip $(shell echo $(KOKKOS_INTERNAL_USE_ARCH_SNB)+$(KOKKOS_INTERNAL_USE_ARCH_AMDAVX) | bc )) -KOKKOS_INTERNAL_USE_ARCH_AVX2 := $(strip $(shell echo $(KOKKOS_INTERNAL_USE_ARCH_HSW) | bc )) +KOKKOS_INTERNAL_USE_ARCH_AVX := $(strip $(shell echo $(KOKKOS_INTERNAL_USE_ARCH_SNB)+$(KOKKOS_INTERNAL_USE_ARCH_AMDAVX) | bc )) +KOKKOS_INTERNAL_USE_ARCH_AVX2 := $(strip $(shell echo $(KOKKOS_INTERNAL_USE_ARCH_HSW) | bc )) +KOKKOS_INTERNAL_USE_ARCH_AVX512MIC := $(strip $(shell echo $(KOKKOS_INTERNAL_USE_ARCH_KNL) | bc )) #Incompatible flags? KOKKOS_INTERNAL_USE_ARCH_MULTIHOST := $(strip $(shell echo "$(KOKKOS_INTERNAL_USE_ARCH_AVX)+$(KOKKOS_INTERNAL_USE_ARCH_AVX2)+$(KOKKOS_INTERNAL_USE_ARCH_KNC)+$(KOKKOS_INTERNAL_USE_ARCH_IBM)+$(KOKKOS_INTERNAL_USE_ARCH_AMDAVX)+$(KOKKOS_INTERNAL_USE_ARCH_ARMV80)>1" | bc )) @@ -225,6 +229,19 @@ ifeq ($(KOKKOS_INTERNAL_USE_LIBRT), 1) KOKKOS_LIBS += -lrt endif +ifeq ($(KOKKOS_INTERNAL_USE_MEMKIND), 1) + KOKKOS_CPPFLAGS += -I$(MEMKIND_PATH)/include + KOKKOS_LDFLAGS += -L$(MEMKIND_PATH)/lib + KOKKOS_LIBS += -lmemkind + tmp := $(shell echo "\#define KOKKOS_HAVE_HBWSPACE 1" >> KokkosCore_config.tmp ) +endif + +tmp := $(shell echo "/* Optimization Settings */" >> KokkosCore_config.tmp) + +ifeq ($(KOKKOS_INTERNAL_OPT_RANGE_AGGRESSIVE_VECTORIZATION), 1) + tmp := $(shell echo "\#define KOKKOS_OPT_RANGE_AGGRESSIVE_VECTORIZATION 1" >> KokkosCore_config.tmp ) +endif + tmp := $(shell echo "/* Cuda Settings */" >> KokkosCore_config.tmp) ifeq ($(KOKKOS_INTERNAL_CUDA_USE_LDG), 1) @@ -265,8 +282,41 @@ ifeq ($(KOKKOS_INTERNAL_USE_ARCH_POWER8), 1) endif ifeq ($(KOKKOS_INTERNAL_USE_ARCH_AVX2), 1) - KOKKOS_CXXFLAGS += -march=core-avx2 - KOKKOS_LDFLAGS += -march=core-avx2 + ifeq ($(KOKKOS_INTERNAL_COMPILER_INTEL), 1) + KOKKOS_CXXFLAGS += -xCORE-AVX2 + KOKKOS_LDFLAGS += -xCORE-AVX2 + else + ifeq ($(KOKKOS_INTERNAL_COMPILER_CRAY), 1) + + else + ifeq ($(KOKKOS_INTERNAL_COMPILER_PGI), 1) + + else + # Assume that this is a really a GNU compiler + KOKKOS_CXXFLAGS += -march=core-avx2 + KOKKOS_LDFLAGS += -march=core-avx2 + endif + endif + endif +endif + +ifeq ($(KOKKOS_INTERNAL_USE_ARCH_AVX512MIC), 1) + ifeq ($(KOKKOS_INTERNAL_COMPILER_INTEL), 1) + KOKKOS_CXXFLAGS += -xMIC-AVX512 + KOKKOS_LDFLAGS += -xMIC-AVX512 + else + ifeq ($(KOKKOS_INTERNAL_COMPILER_CRAY), 1) + + else + ifeq ($(KOKKOS_INTERNAL_COMPILER_PGI), 1) + + else + # Asssume that this is really a GNU compiler + KOKKOS_CXXFLAGS += -march=knl + KOKKOS_LDFLAGS += -march=knl + endif + endif + endif endif ifeq ($(KOKKOS_INTERNAL_USE_ARCH_KNC), 1) diff --git a/lib/kokkos/Makefile.targets b/lib/kokkos/Makefile.targets index 009adb42c0..7574aeb338 100644 --- a/lib/kokkos/Makefile.targets +++ b/lib/kokkos/Makefile.targets @@ -55,3 +55,8 @@ Kokkos_OpenMPexec.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/OpenMP/Kokkos $(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/OpenMP/Kokkos_OpenMPexec.cpp endif +Kokkos_HBWSpace.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/impl/Kokkos_HBWSpace.cpp + $(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/impl/Kokkos_HBWSpace.cpp +Kokkos_HBWAllocators.o: $(KOKKOS_CPP_DEPENDS) $(KOKKOS_PATH)/core/src/impl/Kokkos_HBWAllocators.cpp + $(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) -c $(KOKKOS_PATH)/core/src/impl/Kokkos_HBWAllocators.cpp + diff --git a/lib/kokkos/README b/lib/kokkos/README index 85bd0142bf..904e39abf5 100644 --- a/lib/kokkos/README +++ b/lib/kokkos/README @@ -20,6 +20,13 @@ GTC 2015: A programming guide can be found under doc/Kokkos_PG.pdf. This is an initial version and feedback is greatly appreciated. +A separate repository with extensive tutorial material can be found under +https://github.com/kokkos/kokkos-tutorials. + +If you have a patch to contribute please feel free to issue a pull request against +the develop branch. For major contributions it is better to contact us first +for guidance. + For questions please send an email to kokkos-users@software.sandia.gov @@ -43,6 +50,7 @@ Primary tested compilers are: Secondary tested compilers are: CUDA 6.5 (with gcc 4.7.2) CUDA 7.0 (with gcc 4.7.2) + CUDA 7.5 (with gcc 4.7.2) Other compilers working: PGI 15.4 diff --git a/lib/kokkos/algorithms/CMakeLists.txt b/lib/kokkos/algorithms/CMakeLists.txt new file mode 100644 index 0000000000..7853184a54 --- /dev/null +++ b/lib/kokkos/algorithms/CMakeLists.txt @@ -0,0 +1,10 @@ + + +TRIBITS_SUBPACKAGE(Algorithms) + +ADD_SUBDIRECTORY(src) + +TRIBITS_ADD_TEST_DIRECTORIES(unit_tests) +#TRIBITS_ADD_TEST_DIRECTORIES(performance_tests) + +TRIBITS_SUBPACKAGE_POSTPROCESS() diff --git a/lib/kokkos/algorithms/cmake/Dependencies.cmake b/lib/kokkos/algorithms/cmake/Dependencies.cmake new file mode 100644 index 0000000000..1d71d8af34 --- /dev/null +++ b/lib/kokkos/algorithms/cmake/Dependencies.cmake @@ -0,0 +1,5 @@ +TRIBITS_PACKAGE_DEFINE_DEPENDENCIES( + LIB_REQUIRED_PACKAGES KokkosCore + LIB_OPTIONAL_TPLS Pthread CUDA HWLOC + TEST_OPTIONAL_TPLS CUSPARSE + ) diff --git a/lib/kokkos/algorithms/cmake/KokkosAlgorithms_config.h.in b/lib/kokkos/algorithms/cmake/KokkosAlgorithms_config.h.in new file mode 100644 index 0000000000..67334b70f3 --- /dev/null +++ b/lib/kokkos/algorithms/cmake/KokkosAlgorithms_config.h.in @@ -0,0 +1,4 @@ +#ifndef KOKKOS_ALGORITHMS_CONFIG_H +#define KOKKOS_ALGORITHMS_CONFIG_H + +#endif diff --git a/lib/kokkos/algorithms/src/CMakeLists.txt b/lib/kokkos/algorithms/src/CMakeLists.txt new file mode 100644 index 0000000000..dfbf3323c2 --- /dev/null +++ b/lib/kokkos/algorithms/src/CMakeLists.txt @@ -0,0 +1,21 @@ + +TRIBITS_CONFIGURE_FILE(${PACKAGE_NAME}_config.h) + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +#----------------------------------------------------------------------------- + +FILE(GLOB HEADERS *.hpp) +FILE(GLOB SOURCES *.cpp) +LIST(APPEND HEADERS ${CMAKE_CURRENT_BINARY_DIR}/${PACKAGE_NAME}_config.h) + +#----------------------------------------------------------------------------- + +TRIBITS_ADD_LIBRARY( + kokkosalgorithms + HEADERS ${HEADERS} + SOURCES ${SOURCES} + DEPLIBS + ) + diff --git a/lib/kokkos/algorithms/src/Kokkos_Random.hpp b/lib/kokkos/algorithms/src/Kokkos_Random.hpp index 17f5e073c3..192b1d64f8 100644 --- a/lib/kokkos/algorithms/src/Kokkos_Random.hpp +++ b/lib/kokkos/algorithms/src/Kokkos_Random.hpp @@ -45,7 +45,7 @@ #define KOKKOS_RANDOM_HPP #include -//#include +#include #include #include #include @@ -475,6 +475,58 @@ namespace Kokkos { }; + template + struct rand > { + KOKKOS_INLINE_FUNCTION + static ::Kokkos::complex max () { + return ::Kokkos::complex (1.0, 1.0); + } + KOKKOS_INLINE_FUNCTION + static ::Kokkos::complex draw (Generator& gen) { + const float re = gen.frand (); + const float im = gen.frand (); + return ::Kokkos::complex (re, im); + } + KOKKOS_INLINE_FUNCTION + static ::Kokkos::complex draw (Generator& gen, const ::Kokkos::complex& range) { + const float re = gen.frand (real (range)); + const float im = gen.frand (imag (range)); + return ::Kokkos::complex (re, im); + } + KOKKOS_INLINE_FUNCTION + static ::Kokkos::complex draw (Generator& gen, const ::Kokkos::complex& start, const ::Kokkos::complex& end) { + const float re = gen.frand (real (start), real (end)); + const float im = gen.frand (imag (start), imag (end)); + return ::Kokkos::complex (re, im); + } + }; + + template + struct rand > { + KOKKOS_INLINE_FUNCTION + static ::Kokkos::complex max () { + return ::Kokkos::complex (1.0, 1.0); + } + KOKKOS_INLINE_FUNCTION + static ::Kokkos::complex draw (Generator& gen) { + const double re = gen.drand (); + const double im = gen.drand (); + return ::Kokkos::complex (re, im); + } + KOKKOS_INLINE_FUNCTION + static ::Kokkos::complex draw (Generator& gen, const ::Kokkos::complex& range) { + const double re = gen.drand (real (range)); + const double im = gen.drand (imag (range)); + return ::Kokkos::complex (re, im); + } + KOKKOS_INLINE_FUNCTION + static ::Kokkos::complex draw (Generator& gen, const ::Kokkos::complex& start, const ::Kokkos::complex& end) { + const double re = gen.drand (real (start), real (end)); + const double im = gen.drand (imag (start), imag (end)); + return ::Kokkos::complex (re, im); + } + }; + template class Random_XorShift64_Pool; diff --git a/lib/kokkos/algorithms/unit_tests/CMakeLists.txt b/lib/kokkos/algorithms/unit_tests/CMakeLists.txt new file mode 100644 index 0000000000..654104b44e --- /dev/null +++ b/lib/kokkos/algorithms/unit_tests/CMakeLists.txt @@ -0,0 +1,38 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../src ) + +SET(SOURCES + UnitTestMain.cpp + TestCuda.cpp + ) + +SET(LIBRARIES kokkoscore) + +IF(Kokkos_ENABLE_OpenMP) + LIST( APPEND SOURCES + TestOpenMP.cpp + ) +ENDIF() + +IF(Kokkos_ENABLE_Serial) + LIST( APPEND SOURCES + TestSerial.cpp + ) +ENDIF() + +IF(Kokkos_ENABLE_Pthread) + LIST( APPEND SOURCES + TestThreads.cpp + ) +ENDIF() + +TRIBITS_ADD_EXECUTABLE_AND_TEST( + UnitTest + SOURCES ${SOURCES} + COMM serial mpi + NUM_MPI_PROCS 1 + FAIL_REGULAR_EXPRESSION " FAILED " + TESTONLYLIBS kokkos_gtest + ) diff --git a/lib/kokkos/algorithms/unit_tests/Makefile b/lib/kokkos/algorithms/unit_tests/Makefile index 5fc94ac0f8..5d79364c52 100644 --- a/lib/kokkos/algorithms/unit_tests/Makefile +++ b/lib/kokkos/algorithms/unit_tests/Makefile @@ -6,12 +6,12 @@ vpath %.cpp ${KOKKOS_PATH}/algorithms/unit_tests default: build_all echo "End Build" - + include $(KOKKOS_PATH)/Makefile.kokkos ifeq ($(KOKKOS_INTERNAL_USE_CUDA), 1) - CXX = nvcc_wrapper + CXX = $(NVCC_WRAPPER) CXXFLAGS ?= -O3 LINK = $(CXX) LDFLAGS ?= -lpthread @@ -56,7 +56,7 @@ KokkosAlgorithms_UnitTest_Cuda: $(OBJ_CUDA) $(KOKKOS_LINK_DEPENDS) KokkosAlgorithms_UnitTest_Threads: $(OBJ_THREADS) $(KOKKOS_LINK_DEPENDS) $(LINK) $(KOKKOS_LDFLAGS) $(LDFLAGS) $(EXTRA_PATH) $(OBJ_THREADS) $(KOKKOS_LIBS) $(LIB) -o KokkosAlgorithms_UnitTest_Threads - + KokkosAlgorithms_UnitTest_OpenMP: $(OBJ_OPENMP) $(KOKKOS_LINK_DEPENDS) $(LINK) $(KOKKOS_LDFLAGS) $(LDFLAGS) $(EXTRA_PATH) $(OBJ_OPENMP) $(KOKKOS_LIBS) $(LIB) -o KokkosAlgorithms_UnitTest_OpenMP @@ -74,11 +74,11 @@ test-openmp: KokkosAlgorithms_UnitTest_OpenMP test-serial: KokkosAlgorithms_UnitTest_Serial ./KokkosAlgorithms_UnitTest_Serial - + build_all: $(TARGETS) test: $(TEST_TARGETS) - + clean: kokkos-clean rm -f *.o $(TARGETS) diff --git a/lib/kokkos/cmake/Dependencies.cmake b/lib/kokkos/cmake/Dependencies.cmake new file mode 100644 index 0000000000..8c51eab4d7 --- /dev/null +++ b/lib/kokkos/cmake/Dependencies.cmake @@ -0,0 +1,10 @@ +TRIBITS_PACKAGE_DEFINE_DEPENDENCIES( + SUBPACKAGES_DIRS_CLASSIFICATIONS_OPTREQS + #SubPackageName Directory Class Req/Opt + # + # New Kokkos subpackages: + Core core PS REQUIRED + Containers containers PS OPTIONAL + Algorithms algorithms PS OPTIONAL + Example example EX OPTIONAL + ) diff --git a/lib/kokkos/cmake/tpls/FindTPLCUSPARSE.cmake b/lib/kokkos/cmake/tpls/FindTPLCUSPARSE.cmake new file mode 100644 index 0000000000..aad1e2bad7 --- /dev/null +++ b/lib/kokkos/cmake/tpls/FindTPLCUSPARSE.cmake @@ -0,0 +1,75 @@ +# @HEADER +# ************************************************************************ +# +# Trilinos: An Object-Oriented Solver Framework +# Copyright (2001) Sandia Corporation +# +# +# Copyright (2001) Sandia Corporation. Under the terms of Contract +# DE-AC04-94AL85000, there is a non-exclusive license for use of this +# work by or on behalf of the U.S. Government. Export of this program +# may require a license from the United States Government. +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# 3. Neither the name of the Corporation nor the names of the +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# NOTICE: The United States Government is granted for itself and others +# acting on its behalf a paid-up, nonexclusive, irrevocable worldwide +# license in this data to reproduce, prepare derivative works, and +# perform publicly and display publicly. Beginning five (5) years from +# July 25, 2001, the United States Government is granted for itself and +# others acting on its behalf a paid-up, nonexclusive, irrevocable +# worldwide license in this data to reproduce, prepare derivative works, +# distribute copies to the public, perform publicly and display +# publicly, and to permit others to do so. +# +# NEITHER THE UNITED STATES GOVERNMENT, NOR THE UNITED STATES DEPARTMENT +# OF ENERGY, NOR SANDIA CORPORATION, NOR ANY OF THEIR EMPLOYEES, MAKES +# ANY WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LEGAL LIABILITY OR +# RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR USEFULNESS OF ANY +# INFORMATION, APPARATUS, PRODUCT, OR PROCESS DISCLOSED, OR REPRESENTS +# THAT ITS USE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS. +# +# ************************************************************************ +# @HEADER + +# Check for CUDA support + +IF (NOT TPL_ENABLE_CUDA OR CUDA_VERSION VERSION_LESS "4.1") + MESSAGE(FATAL_ERROR "\nCUSPARSE: did not find acceptable version of CUDA libraries (4.1 or greater)") +ELSE() + IF(CMAKE_VERSION VERSION_LESS "2.8.8") + # FindCUDA before CMake 2.8.8 does not find cusparse library; therefore, we must + find_library(CUDA_cusparse_LIBRARY + cusparse + HINTS ${CUDA_TOOLKIT_ROOT_DIR}/lib + ) + IF(CUDA_cusparse_LIBRARY STREQUAL "CUDA_cusparse_LIBRARY-NOTFOUND") + MESSAGE(FATAL_ERROR "\nCUSPARSE: could not find cuspasre library.") + ENDIF() + ENDIF(CMAKE_VERSION VERSION_LESS "2.8.8") + GLOBAL_SET(TPL_CUSPARSE_LIBRARY_DIRS) + GLOBAL_SET(TPL_CUSPARSE_INCLUDE_DIRS ${TPL_CUDA_INCLUDE_DIRS}) + GLOBAL_SET(TPL_CUSPARSE_LIBRARIES ${CUDA_cusparse_LIBRARY}) +ENDIF() + diff --git a/lib/kokkos/cmake/tpls/FindTPLHWLOC.cmake b/lib/kokkos/cmake/tpls/FindTPLHWLOC.cmake new file mode 100644 index 0000000000..715b3e9bde --- /dev/null +++ b/lib/kokkos/cmake/tpls/FindTPLHWLOC.cmake @@ -0,0 +1,71 @@ +# @HEADER +# ************************************************************************ +# +# Trilinos: An Object-Oriented Solver Framework +# Copyright (2001) Sandia Corporation +# +# +# Copyright (2001) Sandia Corporation. Under the terms of Contract +# DE-AC04-94AL85000, there is a non-exclusive license for use of this +# work by or on behalf of the U.S. Government. Export of this program +# may require a license from the United States Government. +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# 3. Neither the name of the Corporation nor the names of the +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# NOTICE: The United States Government is granted for itself and others +# acting on its behalf a paid-up, nonexclusive, irrevocable worldwide +# license in this data to reproduce, prepare derivative works, and +# perform publicly and display publicly. Beginning five (5) years from +# July 25, 2001, the United States Government is granted for itself and +# others acting on its behalf a paid-up, nonexclusive, irrevocable +# worldwide license in this data to reproduce, prepare derivative works, +# distribute copies to the public, perform publicly and display +# publicly, and to permit others to do so. +# +# NEITHER THE UNITED STATES GOVERNMENT, NOR THE UNITED STATES DEPARTMENT +# OF ENERGY, NOR SANDIA CORPORATION, NOR ANY OF THEIR EMPLOYEES, MAKES +# ANY WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LEGAL LIABILITY OR +# RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR USEFULNESS OF ANY +# INFORMATION, APPARATUS, PRODUCT, OR PROCESS DISCLOSED, OR REPRESENTS +# THAT ITS USE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS. +# +# ************************************************************************ +# @HEADER + + +#----------------------------------------------------------------------------- +# Hardware locality detection and control library. +# +# Acquisition information: +# Date checked: November 2011 +# Checked by: H. Carter Edwards +# Source: http://www.open-mpi.org/projects/hwloc/ +# Version: 1.3 +# + +TRIBITS_TPL_FIND_INCLUDE_DIRS_AND_LIBRARIES( HWLOC + REQUIRED_HEADERS hwloc.h + REQUIRED_LIBS_NAMES "hwloc" + ) + diff --git a/lib/kokkos/cmake/tpls/FindTPLPthread.cmake b/lib/kokkos/cmake/tpls/FindTPLPthread.cmake new file mode 100644 index 0000000000..fc401d7543 --- /dev/null +++ b/lib/kokkos/cmake/tpls/FindTPLPthread.cmake @@ -0,0 +1,82 @@ +# @HEADER +# ************************************************************************ +# +# Trilinos: An Object-Oriented Solver Framework +# Copyright (2001) Sandia Corporation +# +# +# Copyright (2001) Sandia Corporation. Under the terms of Contract +# DE-AC04-94AL85000, there is a non-exclusive license for use of this +# work by or on behalf of the U.S. Government. Export of this program +# may require a license from the United States Government. +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# 3. Neither the name of the Corporation nor the names of the +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# NOTICE: The United States Government is granted for itself and others +# acting on its behalf a paid-up, nonexclusive, irrevocable worldwide +# license in this data to reproduce, prepare derivative works, and +# perform publicly and display publicly. Beginning five (5) years from +# July 25, 2001, the United States Government is granted for itself and +# others acting on its behalf a paid-up, nonexclusive, irrevocable +# worldwide license in this data to reproduce, prepare derivative works, +# distribute copies to the public, perform publicly and display +# publicly, and to permit others to do so. +# +# NEITHER THE UNITED STATES GOVERNMENT, NOR THE UNITED STATES DEPARTMENT +# OF ENERGY, NOR SANDIA CORPORATION, NOR ANY OF THEIR EMPLOYEES, MAKES +# ANY WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LEGAL LIABILITY OR +# RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR USEFULNESS OF ANY +# INFORMATION, APPARATUS, PRODUCT, OR PROCESS DISCLOSED, OR REPRESENTS +# THAT ITS USE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS. +# +# ************************************************************************ +# @HEADER + + +SET(USE_THREADS FALSE) + +IF(NOT TPL_Pthread_INCLUDE_DIRS AND NOT TPL_Pthread_LIBRARY_DIRS AND NOT TPL_Pthread_LIBRARIES) + # Use CMake's Thread finder since it is a bit smarter in determining + # whether pthreads is already built into the compiler and doesn't need + # a library to link. + FIND_PACKAGE(Threads) + #If Threads found a copy of pthreads make sure it is one of the cases the tribits + #tpl system cannot handle. + IF(Threads_FOUND AND CMAKE_USE_PTHREADS_INIT) + IF(CMAKE_THREAD_LIBS_INIT STREQUAL "" OR CMAKE_THREAD_LIBS_INIT STREQUAL "-pthread") + SET(USE_THREADS TRUE) + ENDIF() + ENDIF() +ENDIF() + +IF(USE_THREADS) + SET(TPL_Pthread_INCLUDE_DIRS "") + SET(TPL_Pthread_LIBRARIES "${CMAKE_THREAD_LIBS_INIT}") + SET(TPL_Pthread_LIBRARY_DIRS "") +ELSE() + TRIBITS_TPL_FIND_INCLUDE_DIRS_AND_LIBRARIES( Pthread + REQUIRED_HEADERS pthread.h + REQUIRED_LIBS_NAMES pthread + ) +ENDIF() diff --git a/lib/kokkos/cmake/tpls/FindTPLQTHREAD.cmake b/lib/kokkos/cmake/tpls/FindTPLQTHREAD.cmake new file mode 100644 index 0000000000..994b72b200 --- /dev/null +++ b/lib/kokkos/cmake/tpls/FindTPLQTHREAD.cmake @@ -0,0 +1,70 @@ +# @HEADER +# ************************************************************************ +# +# Trilinos: An Object-Oriented Solver Framework +# Copyright (2001) Sandia Corporation +# +# +# Copyright (2001) Sandia Corporation. Under the terms of Contract +# DE-AC04-94AL85000, there is a non-exclusive license for use of this +# work by or on behalf of the U.S. Government. Export of this program +# may require a license from the United States Government. +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# 3. Neither the name of the Corporation nor the names of the +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# NOTICE: The United States Government is granted for itself and others +# acting on its behalf a paid-up, nonexclusive, irrevocable worldwide +# license in this data to reproduce, prepare derivative works, and +# perform publicly and display publicly. Beginning five (5) years from +# July 25, 2001, the United States Government is granted for itself and +# others acting on its behalf a paid-up, nonexclusive, irrevocable +# worldwide license in this data to reproduce, prepare derivative works, +# distribute copies to the public, perform publicly and display +# publicly, and to permit others to do so. +# +# NEITHER THE UNITED STATES GOVERNMENT, NOR THE UNITED STATES DEPARTMENT +# OF ENERGY, NOR SANDIA CORPORATION, NOR ANY OF THEIR EMPLOYEES, MAKES +# ANY WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LEGAL LIABILITY OR +# RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR USEFULNESS OF ANY +# INFORMATION, APPARATUS, PRODUCT, OR PROCESS DISCLOSED, OR REPRESENTS +# THAT ITS USE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS. +# +# ************************************************************************ +# @HEADER + + +#----------------------------------------------------------------------------- +# Hardware locality detection and control library. +# +# Acquisition information: +# Date checked: July 2014 +# Checked by: H. Carter Edwards +# Source: https://code.google.com/p/qthreads +# + +TRIBITS_TPL_FIND_INCLUDE_DIRS_AND_LIBRARIES( QTHREAD + REQUIRED_HEADERS qthread.h + REQUIRED_LIBS_NAMES "qthread" + ) + diff --git a/lib/kokkos/config/nvcc_wrapper b/lib/kokkos/config/nvcc_wrapper index 63e0ef50a7..058911929b 100755 --- a/lib/kokkos/config/nvcc_wrapper +++ b/lib/kokkos/config/nvcc_wrapper @@ -23,36 +23,72 @@ default_arch="sm_35" # # The default C++ compiler. # -default_compiler=${NVCC_WRAPPER_DEFAULT_COMPILER:-"g++"} -#default_compiler="icpc" -#default_compiler="/usr/local/gcc/4.8.3/bin/g++" -#default_compiler="/usr/local/gcc/4.9.1/bin/g++" +host_compiler=${NVCC_WRAPPER_DEFAULT_COMPILER:-"g++"} +#host_compiler="icpc" +#host_compiler="/usr/local/gcc/4.8.3/bin/g++" +#host_compiler="/usr/local/gcc/4.9.1/bin/g++" # # Internal variables # + +# C++ files cpp_files="" + +# Host compiler arguments xcompiler_args="" -cuda_arg="" + +# Cuda (NVCC) only arguments +cuda_args="" + +# Arguments for both NVCC and Host compiler +shared_args="" + +# Linker arguments xlinker_args="" + +# Object files passable to NVCC object_files="" + +# Link objects for the host linker only object_files_xlinker="" -first_host_option=1 + +# Does the User set the architecture arch_set=0 + +# Does the user overwrite the host compiler ccbin_set=0 -nvcc_error_code=0 + +#Error code of compilation +error_code=0 + +# Do a dry run without actually compiling dry_run=0 + +# Skip NVCC compilation and use host compiler directly +host_only=0 + +# Enable workaround for CUDA 6.5 for pragma ident replace_pragma_ident=0 +# Mark first host compiler argument +first_xcompiler_arg=1 + +temp_dir=${TMPDIR:-/tmp} + #echo "Arguments: $# $@" while [ $# -gt 0 ] do case $1 in #show the executed command - --show) + --show|--nvcc-wrapper-show) dry_run=1 ;; + #run host compilation only + --host-only) + host_only=1 + ;; #replace '#pragma ident' with '#ident' this is needed to compile OpenMPI due to a configure script bug and a non standardized behaviour of pragma with macros --replace-pragma-ident) replace_pragma_ident=1 @@ -61,22 +97,31 @@ do *.cpp|*.cxx|*.cc|*.C|*.c++|*.cu) cpp_files="$cpp_files $1" ;; + #Handle shared args (valid for both nvcc and the host compiler) + -O*|-D*|-c|-I*|-L*|-l*|-g|--help|--version|-E|-M|-shared) + shared_args="$shared_args $1" + ;; + #Handle shared args that have an argument + -o) + shared_args="$shared_args $1 $2" + shift + ;; #Handle known nvcc args - -O*|-D*|-gencode*|-c|-I*|-L*|-l*|-g|--help|--version|--dryrun|--verbose|--keep-dir|-E|-M|-G|--relocatable-device-code*|-shared|-lineinfo|-expt-extended-lambda|--resource-usage) + -gencode*|--dryrun|--verbose|--keep|--keep-dir*|-G|--relocatable-device-code*|-lineinfo|-expt-extended-lambda|--resource-usage) cuda_args="$cuda_args $1" ;; + #Handle known nvcc args that have an argument + -rdc|-maxrregcount|--default-stream) + cuda_args="$cuda_args $1 $2" + shift + ;; #Handle c++11 setting --std=c++11|-std=c++11) - cuda_args="$cuda_args $1" + shared_args="$shared_args $1" ;; #strip of -std=c++98 due to nvcc warnings and Tribits will place both -std=c++11 and -std=c++98 -std=c++98|--std=c++98) ;; - #Handle known nvcc args that have an argument - -o|-rdc|-maxrregcount|--default-stream) - cuda_args="$cuda_args $1 $2" - shift - ;; #strip of pedantic because it produces endless warnings about #LINE added by the preprocessor -pedantic|-Wpedantic|-ansi) ;; @@ -86,7 +131,12 @@ do #strip of "-x cu" because we add that -x) if [[ $2 != "cu" ]]; then - xcompiler_args="$xcompiler_args,-x,$2" + if [ $first_xcompiler_arg -eq 1 ]; then + xcompiler_args="-x,$2" + first_xcompiler_arg=0 + else + xcompiler_args="$xcompiler_args,-x,$2" + fi fi shift ;; @@ -94,6 +144,7 @@ do -ccbin) cuda_args="$cuda_args $1 $2" ccbin_set=1 + host_compiler=$2 shift ;; #Handle -arch argument (if its not set use a default @@ -109,24 +160,25 @@ do #Handle args that should be sent to the linker -Wl*) xlinker_args="$xlinker_args -Xlinker ${1:4:${#1}}" + host_linker_args="$host_linker_args ${1:4:${#1}}" ;; #Handle object files: -x cu applies to all input files, so give them to linker, except if only linking *.a|*.so|*.o|*.obj) object_files="$object_files $1" object_files_xlinker="$object_files_xlinker -Xlinker $1" ;; - #Handle object files: -x cu applies to all input files, so give them to linker, except if only linking + #Handle object files which always need to use "-Xlinker": -x cu applies to all input files, so give them to linker, except if only linking *.so.*|*.dylib) - object_files_xlinker="$object_files_xlinker -Xlinker $1" object_files="$object_files -Xlinker $1" + object_files_xlinker="$object_files_xlinker -Xlinker $1" ;; #All other args are sent to the host compiler *) - if [ $first_host_option -eq 0 ]; then + if [ $first_xcompiler_arg -eq 1 ]; then + xcompiler_args=$1 + first_xcompiler_arg=0 + else xcompiler_args="$xcompiler_args,$1" - else - xcompiler_args="-Xcompiler $1" - first_host_option=0 fi ;; esac @@ -136,7 +188,7 @@ done #Add default host compiler if necessary if [ $ccbin_set -ne 1 ]; then - cuda_args="$cuda_args -ccbin $default_compiler" + cuda_args="$cuda_args -ccbin $host_compiler" fi #Add architecture command @@ -145,7 +197,13 @@ if [ $arch_set -ne 1 ]; then fi #Compose compilation command -command="nvcc $cuda_args $xlinker_args $xcompiler_args" +nvcc_command="nvcc $cuda_args $shared_args $xlinker_args" +if [ $first_xcompiler_arg -eq 0 ]; then + nvcc_command="$nvcc_command -Xcompiler $xcompiler_args" +fi + +#Compose host only command +host_command="$host_compiler $shared_args $xcompiler_args $host_linker_args" #nvcc does not accept '#pragma ident SOME_MACRO_STRING' but it does accept '#ident SOME_MACRO_STRING' if [ $replace_pragma_ident -eq 1 ]; then @@ -155,31 +213,45 @@ if [ $replace_pragma_ident -eq 1 ]; then var=`grep pragma ${file} | grep ident | grep "#"` if [ "${#var}" -gt 0 ] then - sed 's/#[\ \t]*pragma[\ \t]*ident/#ident/g' $file > /tmp/nvcc_wrapper_tmp_$file - cpp_files2="$cpp_files2 /tmp/nvcc_wrapper_tmp_$file" + sed 's/#[\ \t]*pragma[\ \t]*ident/#ident/g' $file > $temp_dir/nvcc_wrapper_tmp_$file + cpp_files2="$cpp_files2 $temp_dir/nvcc_wrapper_tmp_$file" else cpp_files2="$cpp_files2 $file" fi done cpp_files=$cpp_files2 - echo $cpp_files + #echo $cpp_files fi if [ "$cpp_files" ]; then - command="$command $object_files_xlinker -x cu $cpp_files" + nvcc_command="$nvcc_command $object_files_xlinker -x cu $cpp_files" else - command="$command $object_files" + nvcc_command="$nvcc_command $object_files" +fi + +if [ "$cpp_files" ]; then + host_command="$host_command $object_files $cpp_files" +else + host_command="$host_command $object_files" fi #Print command for dryrun if [ $dry_run -eq 1 ]; then - echo $command + if [ $host_only -eq 1 ]; then + echo $host_command + else + echo $nvcc_command + fi exit 0 fi #Run compilation command -$command -nvcc_error_code=$? +if [ $host_only -eq 1 ]; then + $host_command +else + $nvcc_command +fi +error_code=$? #Report error code -exit $nvcc_error_code +exit $error_code diff --git a/lib/kokkos/config/test_all_sandia b/lib/kokkos/config/test_all_sandia index 7d52039bea..659f14066b 100755 --- a/lib/kokkos/config/test_all_sandia +++ b/lib/kokkos/config/test_all_sandia @@ -6,8 +6,6 @@ set -o pipefail -COMPILER_ROOT="/home/projects/x86-64" - GCC_BUILD_LIST="OpenMP,Pthread,Serial,OpenMP_Serial,Pthread_Serial" INTEL_BUILD_LIST="OpenMP,Pthread,Serial,OpenMP_Serial,Pthread_Serial" CLANG_BUILD_LIST="Pthread,Serial,Pthread_Serial" @@ -18,24 +16,17 @@ CLANG_WARNING_FLAGS="-Wall,-Wshadow,-pedantic,-Werror,-Wsign-compare,-Wtype-limi INTEL_WARNING_FLAGS="-Wall,-Wshadow,-pedantic,-Werror,-Wsign-compare,-Wtype-limits,-Wuninitialized" CUDA_WARNING_FLAGS="" -# Format: (compiler module-list build-list exe-name warning-flag) -COMPILERS=("gcc/4.7.2 gcc/4.7.2/base,hwloc/1.10.0/host/gnu/4.7.2 $GCC_BUILD_LIST g++ $GCC_WARNING_FLAGS" - "gcc/4.8.4 gcc/4.9.2/base,hwloc/1.10.0/host/gnu/4.9.2 $GCC_BUILD_LIST g++ $GCC_WARNING_FLAGS" - "gcc/4.9.2 gcc/4.9.2/base,hwloc/1.10.0/host/gnu/4.9.2 $GCC_BUILD_LIST g++ $GCC_WARNING_FLAGS" - "gcc/5.1.0 gcc/5.1.0/base,hwloc/1.10.0/host/gnu/5.1.0 $GCC_BUILD_LIST g++ $GCC_WARNING_FLAGS" - "intel/14.0.4 intel/14.0.4/base,hwloc/1.10.0/host/gnu/4.7.2 $INTEL_BUILD_LIST icpc $INTEL_WARNING_FLAGS" - "intel/15.0.2 intel/15.0.2/base,hwloc/1.10.0/host/gnu/4.7.2 $INTEL_BUILD_LIST icpc $INTEL_WARNING_FLAGS" - "clang/3.5.2 clang/3.5.2/base $CLANG_BUILD_LIST clang++ $CLANG_WARNING_FLAGS" - "clang/3.6.1 clang/3.6.1/base $CLANG_BUILD_LIST clang++ $CLANG_WARNING_FLAGS" - "cuda/6.5.14 cuda/6.5.14,nvcc-wrapper/gnu,gcc/4.7.2/base $CUDA_BUILD_LIST nvcc_wrapper $CUDA_WARNING_FLAGS" - "cuda/7.0.28 cuda/7.0.18,nvcc-wrapper/gnu,gcc/4.7.2/base $CUDA_BUILD_LIST nvcc_wrapper $CUDA_WARNING_FLAGS" - ) +BASE_MODULE_LIST="//base,hwloc/1.10.1///base" +CUDA_MODULE_LIST="/,gcc/4.7.2/base" export OMP_NUM_THREADS=4 -export SEMS_MODULE_ROOT=/projects/modulefiles -module use /home/projects/modulefiles -module use /projects/modulefiles/rhel6-x86_64/sems/compiler +declare -i NUM_RESULTS_TO_KEEP=7 + +RESULT_ROOT_PREFIX=TestAll + +source /projects/modulefiles/utils/sems-modules-init.sh +source /projects/modulefiles/utils/kokkos-modules-init.sh SCRIPT_KOKKOS_ROOT=$( cd "$( dirname "$0" )" && cd .. && pwd ) @@ -47,6 +38,9 @@ DEBUG=False ARGS="" CUSTOM_BUILD_LIST="" DRYRUN=False +BUILD_ONLY=False +declare -i NUM_JOBS_TO_RUN_IN_PARALLEL=3 +TEST_SCRIPT=False while [[ $# > 0 ]] do @@ -61,6 +55,15 @@ CUSTOM_BUILD_LIST="${key#*=}" --debug*) DEBUG=True ;; +--build-only*) +BUILD_ONLY=True +;; +--test-script*) +TEST_SCRIPT=True +;; +--num*) +NUM_JOBS_TO_RUN_IN_PARALLEL="${key#*=}" +;; --dry-run*) DRYRUN=True ;; @@ -69,7 +72,10 @@ echo "test_all_sandia :" echo "--kokkos-path=/Path/To/Kokkos: Path to the Kokkos root directory" echo " Defaults to root repo containing this script" echo "--debug: Run tests in debug. Defaults to False" +echo "--test-script: Test this script, not Kokkos" +echo "--num=N: Number of jobs to run in parallel " echo "--dry-run: Just print what would be executed" +echo "--build-only: Just do builds, don't run anything" echo "--build-list=BUILD,BUILD,BUILD..." echo " Provide a comma-separated list of builds instead of running all builds" echo " Valid items:" @@ -77,6 +83,18 @@ echo " OpenMP, Pthread, Serial, OpenMP_Serial, Pthread_Serial" echo " Cuda_OpenMP, Cuda_Pthread, Cuda_Serial" echo "" echo "ARGS: list of expressions matching compilers to test" +echo " supported compilers" +echo " gcc/4.7.2" +echo " gcc/4.8.4" +echo " gcc/4.9.2" +echo " gcc/5.1.0" +echo " intel/14.0.4" +echo " intel/15.0.2" +echo " clang/3.5.2" +echo " clang/3.6.1" +echo " cuda/6.5.14" +echo " cuda/7.0.28" +echo " cuda/7.5.18" echo "" echo "Examples:" echo " Run all tests" @@ -93,6 +111,10 @@ echo " % test_all_sandia --debug" echo "" echo " Run gcc/4.7.2 and only do OpenMP and OpenMP_Serial builds" echo " % test_all_sandia gcc/4.7.2 --build-list=OpenMP,OpenMP_Serial" +echo "" +echo "If you want to kill the tests, do:" +echo " hit ctrl-z" +echo " % kill -9 %1" echo exit 0 ;; @@ -104,7 +126,6 @@ esac shift done - # set kokkos path if [ -z "$KOKKOS_PATH" ]; then KOKKOS_PATH=$SCRIPT_KOKKOS_ROOT @@ -125,12 +146,26 @@ if [ -z "$ARGS" ]; then ARGS='?' fi +# Format: (compiler module-list build-list exe-name warning-flag) +COMPILERS=("gcc/4.7.2 $BASE_MODULE_LIST $GCC_BUILD_LIST g++ $GCC_WARNING_FLAGS" + "gcc/4.8.4 $BASE_MODULE_LIST $GCC_BUILD_LIST g++ $GCC_WARNING_FLAGS" + "gcc/4.9.2 $BASE_MODULE_LIST $GCC_BUILD_LIST g++ $GCC_WARNING_FLAGS" + "gcc/5.1.0 $BASE_MODULE_LIST $GCC_BUILD_LIST g++ $GCC_WARNING_FLAGS" + "intel/14.0.4 $BASE_MODULE_LIST $INTEL_BUILD_LIST icpc $INTEL_WARNING_FLAGS" + "intel/15.0.2 $BASE_MODULE_LIST $INTEL_BUILD_LIST icpc $INTEL_WARNING_FLAGS" + "clang/3.5.2 $BASE_MODULE_LIST $CLANG_BUILD_LIST clang++ $CLANG_WARNING_FLAGS" + "clang/3.6.1 $BASE_MODULE_LIST $CLANG_BUILD_LIST clang++ $CLANG_WARNING_FLAGS" + "cuda/6.5.14 $CUDA_MODULE_LIST $CUDA_BUILD_LIST $KOKKOS_PATH/config/nvcc_wrapper $CUDA_WARNING_FLAGS" + "cuda/7.0.28 $CUDA_MODULE_LIST $CUDA_BUILD_LIST $KOKKOS_PATH/config/nvcc_wrapper $CUDA_WARNING_FLAGS" + "cuda/7.5.18 $CUDA_MODULE_LIST $CUDA_BUILD_LIST $KOKKOS_PATH/config/nvcc_wrapper $CUDA_WARNING_FLAGS" + ) + # Process args to figure out which compilers to test COMPILERS_TO_TEST="" for ARG in $ARGS; do for COMPILER_DATA in "${COMPILERS[@]}"; do - arr=($COMPILER_DATA) - COMPILER=${arr[0]} + ARR=($COMPILER_DATA) + COMPILER=${ARR[0]} if [[ "$COMPILER" = $ARG* ]]; then if [[ "$COMPILERS_TO_TEST" != *${COMPILER}* ]]; then COMPILERS_TO_TEST="$COMPILERS_TO_TEST $COMPILER" @@ -145,15 +180,28 @@ done # Functions # +# get_compiler_name +get_compiler_name() { + echo $1 | cut -d/ -f1 +} + +# get_compiler_version +get_compiler_version() { + echo $1 | cut -d/ -f2 +} + # Do not call directly get_compiler_data() { - compiler=$1 - item=$2 + local compiler=$1 + local item=$2 + local compiler_name=$(get_compiler_name $compiler) + local compiler_vers=$(get_compiler_version $compiler) + local compiler_data for compiler_data in "${COMPILERS[@]}" ; do - arr=($compiler_data) + local arr=($compiler_data) if [ "$compiler" = "${arr[0]}" ]; then - echo "${arr[$item]}" | tr , ' ' + echo "${arr[$item]}" | tr , ' ' | sed -e "s//$compiler_name/g" -e "s//$compiler_vers/g" return 0 fi done @@ -186,33 +234,60 @@ get_compiler_warning_flags() { run_cmd() { echo "RUNNING: $*" if [ "$DRYRUN" != "True" ]; then - eval "$*" + eval "$* 2>&1" fi } +# report_and_log_test_results report_and_log_test_result() { - if [ "$1" = "0" ]; then - echo "PASSED $2" - TEST_RESULTS="${TEST_RESULTS}\nPASSED $2" + # Use sane var names + local success=$1; local desc=$2; local phase=$3; + + if [ "$success" = "0" ]; then + echo " PASSED $desc" + touch $PASSED_DIR/$desc else - echo "FAILED $2" >&2 - TEST_RESULTS="${TEST_RESULTS}\nFAILED $2 ($3)" - NUM_FAILED+=1 + echo " FAILED $desc" >&2 + echo $phase > $FAILED_DIR/$desc + cat ${desc}.${phase}.log fi } +setup_env() { + local compiler=$1 + local compiler_modules=$(get_compiler_modules $compiler) + + module purge + + local mod + for mod in $compiler_modules; do + module load $mod 2>&1 + # It is ridiculously hard to check for the success of a loaded + # module. Module does not return error codes and piping to grep + # causes module to run in a subshell. + module list 2>&1 | grep "$mod" >& /dev/null || return 1 + done + + return 0 +} + # single_build_and_test single_build_and_test() { # Use sane var names local compiler=$1; local build=$2; local build_type=$3; - cd $ROOT_DIR/$compiler + # set up env + mkdir -p $ROOT_DIR/$compiler/"${build}-$build_type" + cd $ROOT_DIR/$compiler/"${build}-$build_type" + local desc=$(echo "${compiler}-${build}-${build_type}" | sed 's:/:-:g') + setup_env $compiler >& ${desc}.configure.log || { report_and_log_test_result 1 ${desc} configure && return 0; } + # Set up flags local compiler_warning_flags=$(get_compiler_warning_flags $compiler) local compiler_exe=$(get_compiler_exe_name $compiler) if [[ "$build_type" = hwloc* ]]; then - local extra_args="--with-hwloc=$HWLOC_ROOT" + local extra_args=--with-hwloc=$(dirname $(dirname $(which hwloc-info))) fi if [[ "$build_type" = *debug* ]]; then @@ -222,36 +297,63 @@ single_build_and_test() { local cxxflags="-O3 $compiler_warning_flags" fi - local desc=$(echo "${compiler}-${build}-${build_type}" | sed 's:/:-:g') - echo " Doing build: $desc" - - mkdir "${build}-$build_type" - cd "${build}-$build_type" + if [[ "$compiler" == cuda* ]]; then + cxxflags="--keep --keep-dir=$(pwd) $cxxflags" + export TMPDIR=$(pwd) + fi # cxxflags="-DKOKKOS_USING_EXPERIMENTAL_VIEW $cxxflags" - run_cmd ${KOKKOS_PATH}/generate_makefile.bash --with-devices=$build --compiler=$(which $compiler_exe) --cxxflags=\"$cxxflags\" \"$extra_args\" 2>&1 | tee ${desc}.configure.log || { report_and_log_test_result 1 ${desc} configure && return 0; } - run_cmd make build-test 2>&1 | tee ${desc}.build.log || { report_and_log_test_result 1 ${desc} build && return 0; } - run_cmd make test 2>&1 | tee ${desc}.test.log || { report_and_log_test_result 1 ${desc} test && return 0; } + echo " Starting job $desc" + + if [ "$TEST_SCRIPT" = "True" ]; then + local rand=$[ 1 + $[ RANDOM % 10 ]] + sleep $rand + if [ $rand -gt 5 ]; then + run_cmd ls fake_problem >& ${desc}.configure.log || { report_and_log_test_result 1 $desc configure && return 0; } + fi + else + run_cmd ${KOKKOS_PATH}/generate_makefile.bash --with-devices=$build --compiler=$(which $compiler_exe) --cxxflags=\"$cxxflags\" $extra_args >& ${desc}.configure.log || { report_and_log_test_result 1 ${desc} configure && return 0; } + run_cmd make build-test >& ${desc}.build.log || { report_and_log_test_result 1 ${desc} build && return 0; } + if [[ "$BUILD_ONLY" == False ]]; then + run_cmd make test >& ${desc}.test.log || { report_and_log_test_result 1 ${desc} test && return 0; } + fi + fi + report_and_log_test_result 0 $desc + return 0 } -setup_env() { - local compiler=$1 - local compiler_modules=$(get_compiler_modules $compiler) - - module purge - - for mod in $compiler_modules; do - module load $mod - # It is ridiculously hard to check for the success of a loaded - # module. Module does not return error codes and piping to grep - # causes module to run in a subshell. - module list 2>&1 | grep "$mod" +# wait_for_jobs +wait_for_jobs() { + local -i max_jobs=$1 + local -i num_active_jobs=$(jobs | wc -l) + while [ $num_active_jobs -ge $max_jobs ] + do + sleep 1 + num_active_jobs=$(jobs | wc -l) + jobs >& /dev/null done } +# run_in_background +run_in_background() { + local compiler=$1 + + local -i num_jobs=$NUM_JOBS_TO_RUN_IN_PARALLEL + if [[ "$BUILD_ONLY" == True ]]; then + num_jobs=8 + else + if [[ "$compiler" == cuda* ]]; then + num_jobs=1 + fi + fi + wait_for_jobs $num_jobs + + single_build_and_test $* & +} + # build_and_test_all build_and_test_all() { # Get compiler data @@ -262,44 +364,74 @@ build_and_test_all() { local compiler_build_list=$(echo "$CUSTOM_BUILD_LIST" | tr , ' ') fi - # set up env - cd $ROOT_DIR - mkdir -p $compiler - setup_env $compiler - # do builds + local build for build in $compiler_build_list do - single_build_and_test $compiler $build $BUILD_TYPE + run_in_background $compiler $build $BUILD_TYPE # If not cuda, do a hwloc test too if [[ "$compiler" != cuda* ]]; then - single_build_and_test $compiler $build "hwloc-$BUILD_TYPE" + run_in_background $compiler $build "hwloc-$BUILD_TYPE" fi done return 0 } +get_test_root_dir() { + local existing_results=$(find . -maxdepth 1 -name "$RESULT_ROOT_PREFIX*" | sort) + local -i num_existing_results=$(echo $existing_results | tr ' ' '\n' | wc -l) + local -i num_to_delete=${num_existing_results}-${NUM_RESULTS_TO_KEEP} + + if [ $num_to_delete -gt 0 ]; then + /bin/rm -rf $(echo $existing_results | tr ' ' '\n' | head -n $num_to_delete) + fi + + echo $(pwd)/${RESULT_ROOT_PREFIX}_$(date +"%Y-%m-%d_%H.%M.%S") +} + +wait_summarize_and_exit() { + wait_for_jobs 1 + + echo "#######################################################" + echo "PASSED TESTS" + echo "#######################################################" + + \ls -1 $PASSED_DIR | sort + + echo "#######################################################" + echo "FAILED TESTS" + echo "#######################################################" + + local failed_test + local -i rv=0 + for failed_test in $(\ls -1 $FAILED_DIR) + do + echo $failed_test "("$(cat $FAILED_DIR/$failed_test)" failed)" + rv=$rv+1 + done + + exit $rv +} + # # Main # -/bin/rm -rf TestAll -mkdir TestAll -cd TestAll +ROOT_DIR=$(get_test_root_dir) +mkdir -p $ROOT_DIR +cd $ROOT_DIR -TEST_RESULTS="" -declare -i NUM_FAILED=0 -ROOT_DIR=$(pwd) +PASSED_DIR=$ROOT_DIR/results/passed +FAILED_DIR=$ROOT_DIR/results/failed +mkdir -p $PASSED_DIR +mkdir -p $FAILED_DIR + +echo "Going to test compilers: " $COMPILERS_TO_TEST for COMPILER in $COMPILERS_TO_TEST; do echo "Testing compiler $COMPILER" build_and_test_all $COMPILER done -echo "#######################################################" -echo "RESULT SUMMARY" -echo "#######################################################" -echo -e $TEST_RESULTS - -exit $NUM_FAILED +wait_summarize_and_exit diff --git a/lib/kokkos/config/testing_scripts/obj_size_opt_check b/lib/kokkos/config/testing_scripts/obj_size_opt_check new file mode 100755 index 0000000000..47c84d1a92 --- /dev/null +++ b/lib/kokkos/config/testing_scripts/obj_size_opt_check @@ -0,0 +1,287 @@ +#! /usr/bin/env python + +""" +Compute the size at which the current compiler will start to +significantly scale back optimization. + +The CPP file being modified will need the following tags. +// JGF_DUPLICATE_BEGIN - Put before start of function to duplicate +// JGF_DUPLICATE_END - Put after end of function to duplcate +// JGF_DUPE function_name(args); - Put anywhere where it's legal to +put a function call but not in your timing section. + +The program will need to output the string: +FOM: +This will represent the program's performance +""" + +import argparse, sys, os, doctest, subprocess, re, time + +VERBOSE = False + +############################################################################### +def parse_command_line(args, description): +############################################################################### + parser = argparse.ArgumentParser( + usage="""\n%s [--verbose] +OR +%s --help +OR +%s --test + +\033[1mEXAMPLES:\033[0m + > %s foo.cpp 'make -j4' foo +""" % ((os.path.basename(args[0]), ) * 4), + +description=description, + +formatter_class=argparse.ArgumentDefaultsHelpFormatter +) + + parser.add_argument("cppfile", help="Name of file to modify.") + + parser.add_argument("buildcmd", help="Build command") + + parser.add_argument("execmd", help="Run command") + + parser.add_argument("-v", "--verbose", action="store_true", + help="Print extra information") + + parser.add_argument("-s", "--start", type=int, default=1, + help="Starting number of dupes") + + parser.add_argument("-e", "--end", type=int, default=1000, + help="Ending number of dupes") + + parser.add_argument("-n", "--repeat", type=int, default=10, + help="Number of times to repeat an individial execution. Best value will be taken.") + + parser.add_argument("-t", "--template", action="store_true", + help="Use templating instead of source copying to increase object size") + + parser.add_argument("-c", "--csv", action="store_true", + help="Print results as CSV") + + args = parser.parse_args(args[1:]) + + if (args.verbose): + global VERBOSE + VERBOSE = True + + return args.cppfile, args.buildcmd, args.execmd, args.start, args.end, args.repeat, args.template, args.csv + +############################################################################### +def verbose_print(msg, override=None): +############################################################################### + if ( (VERBOSE and not override is False) or override): + print msg + +############################################################################### +def error_print(msg): +############################################################################### + print >> sys.stderr, msg + +############################################################################### +def expect(condition, error_msg): +############################################################################### + """ + Similar to assert except doesn't generate an ugly stacktrace. Useful for + checking user error, not programming error. + """ + if (not condition): + raise SystemExit("FAIL: %s" % error_msg) + +############################################################################### +def run_cmd(cmd, ok_to_fail=False, input_str=None, from_dir=None, verbose=None, + arg_stdout=subprocess.PIPE, arg_stderr=subprocess.PIPE): +############################################################################### + verbose_print("RUN: %s" % cmd, verbose) + + if (input_str is not None): + stdin = subprocess.PIPE + else: + stdin = None + + proc = subprocess.Popen(cmd, + shell=True, + stdout=arg_stdout, + stderr=arg_stderr, + stdin=stdin, + cwd=from_dir) + output, errput = proc.communicate(input_str) + output = output.strip() if output is not None else output + stat = proc.wait() + + if (ok_to_fail): + return stat, output, errput + else: + if (arg_stderr is not None): + errput = errput if errput is not None else open(arg_stderr.name, "r").read() + expect(stat == 0, "Command: '%s' failed with error '%s'" % (cmd, errput)) + else: + expect(stat == 0, "Command: '%s' failed. See terminal output" % cmd) + return output + +############################################################################### +def build_and_run(source, cppfile, buildcmd, execmd, repeat): +############################################################################### + open(cppfile, 'w').writelines(source) + + run_cmd(buildcmd) + + best = None + for i in xrange(repeat): + wait_for_quiet_machine() + output = run_cmd(execmd) + + current = None + fom_regex = re.compile(r'^FOM: ([0-9.]+)$') + for line in output.splitlines(): + m = fom_regex.match(line) + if (m is not None): + current = float(m.groups()[0]) + break + + expect(current is not None, "No lines in output matched FOM regex") + + if (best is None or best < current): + best = current + + return best + +############################################################################### +def wait_for_quiet_machine(): +############################################################################### + while(True): + time.sleep(2) + + # The first iteration of top gives garbage results + idle_pct_raw = run_cmd("top -bn2 | grep 'Cpu(s)' | tr ',' ' ' | tail -n 1 | awk '{print $5}'") + + idle_pct_re = re.compile(r'^([0-9.]+)%id$') + m = idle_pct_re.match(idle_pct_raw) + + expect(m is not None, "top not returning output in expected form") + + idle_pct = float(m.groups()[0]) + if (idle_pct < 95): + error_print("Machine is too busy, waiting for it to become free") + else: + break + +############################################################################### +def add_n_dupes(curr_lines, num_dupes, template): +############################################################################### + function_name = None + function_invocation = None + function_lines = [] + + function_re = re.compile(r'^.* (\w+) *[(]') + function_inv_re = re.compile(r'^.*JGF_DUPE: +(.+)$') + + # Get function lines + record = False + definition_insertion_point = None + invocation_insertion_point = None + for idx, line in enumerate(curr_lines): + if ("JGF_DUPLICATE_BEGIN" in line): + record = True + m = function_re.match(curr_lines[idx+1]) + expect(m is not None, "Could not find function in line '%s'" % curr_lines[idx+1]) + function_name = m.groups()[0] + + elif ("JGF_DUPLICATE_END" in line): + record = False + definition_insertion_point = idx + 1 + + elif (record): + function_lines.append(line) + + elif ("JGF_DUPE" in line): + m = function_inv_re.match(line) + expect(m is not None, "Could not find function invocation example in line '%s'" % line) + function_invocation = m.groups()[0] + invocation_insertion_point = idx + 1 + + expect(function_name is not None, "Could not find name of dupe function") + expect(function_invocation is not None, "Could not find function invocation point") + + expect(definition_insertion_point < invocation_insertion_point, "fix me") + + dupe_func_defs = [] + dupe_invocations = ["int jgf_rand = std::rand();\n", "if (false) {}\n"] + + for i in xrange(num_dupes): + if (not template): + dupe_func = list(function_lines) + dupe_func[0] = dupe_func[0].replace(function_name, "%s%d" % (function_name, i)) + dupe_func_defs.extend(dupe_func) + + dupe_invocations.append("else if (jgf_rand == %d) " % i) + if (template): + dupe_call = function_invocation.replace(function_name, "%s<%d>" % (function_name, i)) + "\n" + else: + dupe_call = function_invocation.replace(function_name, "%s%d" % (function_name, i)) + "\n" + dupe_invocations.append(dupe_call) + + curr_lines[invocation_insertion_point:invocation_insertion_point] = dupe_invocations + curr_lines[definition_insertion_point:definition_insertion_point] = dupe_func_defs + +############################################################################### +def report(num_dupes, curr_lines, object_file, orig_fom, curr_fom, csv=False, is_first_report=False): +############################################################################### + fom_change = (curr_fom - orig_fom) / orig_fom + + if (csv): + if (is_first_report): + print "num_dupes, obj_byte_size, loc, fom, pct_diff" + + print "%s, %s, %s, %s, %s" % (num_dupes, os.path.getsize(object_file), len(curr_lines), curr_fom, fom_change*100) + else: + print "========================================================" + print "For number of dupes:", num_dupes + print "Object file size (bytes):", os.path.getsize(object_file) + print "Lines of code:", len(curr_lines) + print "Field of merit:", curr_fom + print "Change pct:", fom_change*100 + +############################################################################### +def obj_size_opt_check(cppfile, buildcmd, execmd, start, end, repeat, template, csv=False): +############################################################################### + orig_source_lines = open(cppfile, 'r').readlines() + + backup_file = "%s.orig" % cppfile + object_file = "%s.o" % os.path.splitext(cppfile)[0] + os.rename(cppfile, backup_file) + + orig_fom = build_and_run(orig_source_lines, cppfile, buildcmd, execmd, repeat) + report(0, orig_source_lines, object_file, orig_fom, orig_fom, csv=csv, is_first_report=True) + + i = start + while (i < end): + curr_lines = list(orig_source_lines) + add_n_dupes(curr_lines, i, template) + + curr_fom = build_and_run(curr_lines, cppfile, buildcmd, execmd, repeat) + + report(i, curr_lines, object_file, orig_fom, curr_fom, csv=csv) + + i *= 2 # make growth function configurable? + + os.remove(cppfile) + os.rename(backup_file, cppfile) + +############################################################################### +def _main_func(description): +############################################################################### + if ("--test" in sys.argv): + test_results = doctest.testmod(verbose=True) + sys.exit(1 if test_results.failed > 0 else 0) + + cppfile, buildcmd, execmd, start, end, repeat, template, csv = parse_command_line(sys.argv, description) + + obj_size_opt_check(cppfile, buildcmd, execmd, start, end, repeat, template, csv) + +############################################################################### +if (__name__ == "__main__"): + _main_func(__doc__) diff --git a/lib/kokkos/containers/CMakeLists.txt b/lib/kokkos/containers/CMakeLists.txt new file mode 100644 index 0000000000..894935fa01 --- /dev/null +++ b/lib/kokkos/containers/CMakeLists.txt @@ -0,0 +1,10 @@ + + +TRIBITS_SUBPACKAGE(Containers) + +ADD_SUBDIRECTORY(src) + +TRIBITS_ADD_TEST_DIRECTORIES(unit_tests) +TRIBITS_ADD_TEST_DIRECTORIES(performance_tests) + +TRIBITS_SUBPACKAGE_POSTPROCESS() diff --git a/lib/kokkos/containers/cmake/Dependencies.cmake b/lib/kokkos/containers/cmake/Dependencies.cmake new file mode 100644 index 0000000000..1d71d8af34 --- /dev/null +++ b/lib/kokkos/containers/cmake/Dependencies.cmake @@ -0,0 +1,5 @@ +TRIBITS_PACKAGE_DEFINE_DEPENDENCIES( + LIB_REQUIRED_PACKAGES KokkosCore + LIB_OPTIONAL_TPLS Pthread CUDA HWLOC + TEST_OPTIONAL_TPLS CUSPARSE + ) diff --git a/lib/kokkos/containers/cmake/KokkosContainers_config.h.in b/lib/kokkos/containers/cmake/KokkosContainers_config.h.in new file mode 100644 index 0000000000..d91fdda1e3 --- /dev/null +++ b/lib/kokkos/containers/cmake/KokkosContainers_config.h.in @@ -0,0 +1,4 @@ +#ifndef KOKKOS_CONTAINERS_CONFIG_H +#define KOKKOS_CONTAINERS_CONFIG_H + +#endif diff --git a/lib/kokkos/containers/performance_tests/CMakeLists.txt b/lib/kokkos/containers/performance_tests/CMakeLists.txt new file mode 100644 index 0000000000..6b57802935 --- /dev/null +++ b/lib/kokkos/containers/performance_tests/CMakeLists.txt @@ -0,0 +1,26 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../src ) + +SET(SOURCES + TestMain.cpp + TestCuda.cpp + ) + +IF(Kokkos_ENABLE_Pthread) + LIST( APPEND SOURCES TestThreads.cpp) +ENDIF() + +IF(Kokkos_ENABLE_OpenMP) + LIST( APPEND SOURCES TestOpenMP.cpp) +ENDIF() + +TRIBITS_ADD_EXECUTABLE_AND_TEST( + PerformanceTest + SOURCES ${SOURCES} + COMM serial mpi + NUM_MPI_PROCS 1 + FAIL_REGULAR_EXPRESSION " FAILED " + TESTONLYLIBS kokkos_gtest + ) diff --git a/lib/kokkos/containers/performance_tests/Makefile b/lib/kokkos/containers/performance_tests/Makefile index 7ced945282..e7abaf44ce 100644 --- a/lib/kokkos/containers/performance_tests/Makefile +++ b/lib/kokkos/containers/performance_tests/Makefile @@ -6,12 +6,12 @@ vpath %.cpp ${KOKKOS_PATH}/containers/performance_tests default: build_all echo "End Build" - + include $(KOKKOS_PATH)/Makefile.kokkos ifeq ($(KOKKOS_INTERNAL_USE_CUDA), 1) - CXX = nvcc_wrapper + CXX = $(NVCC_WRAPPER) CXXFLAGS ?= -O3 LINK = $(CXX) LDFLAGS ?= -lpthread @@ -50,7 +50,7 @@ KokkosContainers_PerformanceTest_Cuda: $(OBJ_CUDA) $(KOKKOS_LINK_DEPENDS) KokkosContainers_PerformanceTest_Threads: $(OBJ_THREADS) $(KOKKOS_LINK_DEPENDS) $(LINK) $(KOKKOS_LDFLAGS) $(LDFLAGS) $(EXTRA_PATH) $(OBJ_THREADS) $(KOKKOS_LIBS) $(LIB) -o KokkosContainers_PerformanceTest_Threads - + KokkosContainers_PerformanceTest_OpenMP: $(OBJ_OPENMP) $(KOKKOS_LINK_DEPENDS) $(LINK) $(KOKKOS_LDFLAGS) $(LDFLAGS) $(EXTRA_PATH) $(OBJ_OPENMP) $(KOKKOS_LIBS) $(LIB) -o KokkosContainers_PerformanceTest_OpenMP @@ -63,11 +63,11 @@ test-threads: KokkosContainers_PerformanceTest_Threads test-openmp: KokkosContainers_PerformanceTest_OpenMP ./KokkosContainers_PerformanceTest_OpenMP - + build_all: $(TARGETS) test: $(TEST_TARGETS) - + clean: kokkos-clean rm -f *.o $(TARGETS) diff --git a/lib/kokkos/containers/src/CMakeLists.txt b/lib/kokkos/containers/src/CMakeLists.txt new file mode 100644 index 0000000000..da5a791530 --- /dev/null +++ b/lib/kokkos/containers/src/CMakeLists.txt @@ -0,0 +1,31 @@ + +TRIBITS_CONFIGURE_FILE(${PACKAGE_NAME}_config.h) + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +#----------------------------------------------------------------------------- + +SET(HEADERS "") +SET(SOURCES "") + +SET(HEADERS_IMPL "") + +FILE(GLOB HEADERS *.hpp) +FILE(GLOB HEADERS_IMPL impl/*.hpp) +FILE(GLOB SOURCES impl/*.cpp) + +SET(TRILINOS_INCDIR ${CMAKE_INSTALL_PREFIX}/${${PROJECT_NAME}_INSTALL_INCLUDE_DIR}) + +INSTALL(FILES ${HEADERS_IMPL} DESTINATION ${TRILINOS_INCDIR}/impl/) + +TRIBITS_ADD_LIBRARY( + kokkoscontainers + HEADERS ${HEADERS} + NOINSTALLHEADERS ${HEADERS_IMPL} + SOURCES ${SOURCES} + DEPLIBS + ) + +#----------------------------------------------------------------------------- + diff --git a/lib/kokkos/containers/src/Kokkos_Bitset.hpp b/lib/kokkos/containers/src/Kokkos_Bitset.hpp index b51b1c2b26..74da5f61b5 100644 --- a/lib/kokkos/containers/src/Kokkos_Bitset.hpp +++ b/lib/kokkos/containers/src/Kokkos_Bitset.hpp @@ -90,7 +90,7 @@ public: private: enum { block_size = static_cast(sizeof(unsigned)*CHAR_BIT) }; enum { block_mask = block_size-1u }; - enum { block_shift = static_cast(Impl::power_of_two::value) }; + enum { block_shift = Kokkos::Impl::integral_power_of_two(block_size) }; public: @@ -322,7 +322,7 @@ public: private: enum { block_size = static_cast(sizeof(unsigned)*CHAR_BIT) }; enum { block_mask = block_size -1u }; - enum { block_shift = static_cast(Impl::power_of_two::value) }; + enum { block_shift = Kokkos::Impl::integral_power_of_two(block_size) }; public: ConstBitset() diff --git a/lib/kokkos/containers/src/Kokkos_DualView.hpp b/lib/kokkos/containers/src/Kokkos_DualView.hpp index 5e70731bdc..68d033641b 100644 --- a/lib/kokkos/containers/src/Kokkos_DualView.hpp +++ b/lib/kokkos/containers/src/Kokkos_DualView.hpp @@ -106,9 +106,9 @@ public: //! The type of a Kokkos::View on the device. typedef View< typename traits::data_type , - typename traits::array_layout , - typename traits::device_type , - typename traits::memory_traits > t_dev ; + Arg1Type , + Arg2Type , + Arg3Type > t_dev ; /// \typedef t_host /// \brief The type of a Kokkos::View host mirror of \c t_dev. @@ -117,9 +117,9 @@ public: //! The type of a const View on the device. //! The type of a Kokkos::View on the device. typedef View< typename traits::const_data_type , - typename traits::array_layout , - typename traits::device_type , - typename traits::memory_traits > t_dev_const ; + Arg1Type , + Arg2Type , + Arg3Type > t_dev_const ; /// \typedef t_host_const /// \brief The type of a const View host mirror of \c t_dev_const. @@ -221,6 +221,19 @@ public: modified_host (src.modified_host) {} + //! Subview constructor + template< class SD, class S1 , class S2 , class S3 + , class Arg0 , class ... Args > + DualView( const DualView & src + , const Arg0 & arg0 + , Args ... args + ) + : d_view( Kokkos::subview( src.d_view , arg0 , args ... ) ) + , h_view( Kokkos::subview( src.h_view , arg0 , args ... ) ) + , modified_device (src.modified_device) + , modified_host (src.modified_host) + {} + /// \brief Create DualView from existing device and host View objects. /// /// This constructor assumes that the device and host View objects @@ -237,7 +250,30 @@ public: modified_device (View ("DualView::modified_device")), modified_host (View ("DualView::modified_host")) { +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) Impl::assert_shapes_are_equal (d_view.shape (), h_view.shape ()); +#else + if ( d_view.rank != h_view.rank || + d_view.dimension_0() != h_view.dimension_0() || + d_view.dimension_1() != h_view.dimension_1() || + d_view.dimension_2() != h_view.dimension_2() || + d_view.dimension_3() != h_view.dimension_3() || + d_view.dimension_4() != h_view.dimension_4() || + d_view.dimension_5() != h_view.dimension_5() || + d_view.dimension_6() != h_view.dimension_6() || + d_view.dimension_7() != h_view.dimension_7() || + d_view.stride_0() != h_view.stride_0() || + d_view.stride_1() != h_view.stride_1() || + d_view.stride_2() != h_view.stride_2() || + d_view.stride_3() != h_view.stride_3() || + d_view.stride_4() != h_view.stride_4() || + d_view.stride_5() != h_view.stride_5() || + d_view.stride_6() != h_view.stride_6() || + d_view.stride_7() != h_view.stride_7() || + d_view.span() != h_view.span() ) { + Kokkos::Impl::throw_runtime_exception("DualView constructed with incompatible views"); + } +#endif } //@} @@ -501,6 +537,52 @@ public: }; } // namespace Kokkos + +//---------------------------------------------------------------------------- +//---------------------------------------------------------------------------- +// +// Partial specializations of Kokkos::subview() for DualView objects. +// + +#if defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + +namespace Kokkos { +namespace Impl { + +template< class D, class A1, class A2, class A3, class ... Args > +struct DualViewSubview { + + typedef typename Kokkos::Experimental::Impl::ViewMapping + < void + , Kokkos::ViewTraits< D, A1, A2, A3 > + , Args ... + >::traits_type dst_traits ; + + typedef Kokkos::DualView + < typename dst_traits::data_type + , typename dst_traits::array_layout + , typename dst_traits::device_type + , typename dst_traits::memory_traits + > type ; +}; + +} /* namespace Impl */ + + +template< class D , class A1 , class A2 , class A3 , class ... Args > +typename Impl::DualViewSubview::type +subview( const DualView & src , Args ... args ) +{ + return typename + Impl::DualViewSubview::type( src , args ... ); +} + +} /* namespace Kokkos */ + +#else + +//---------------------------------------------------------------------------- +//---------------------------------------------------------------------------- // // Partial specializations of Kokkos::subview() for DualView objects. // @@ -839,6 +921,15 @@ subview( const DualView & src , return sub_view; } +} // namespace Kokkos + +#endif /* defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) */ + +//---------------------------------------------------------------------------- +//---------------------------------------------------------------------------- + +namespace Kokkos { + // // Partial specialization of Kokkos::deep_copy() for DualView objects. // diff --git a/lib/kokkos/containers/src/Kokkos_Vector.hpp b/lib/kokkos/containers/src/Kokkos_Vector.hpp index db54b0c350..6a360e8d19 100644 --- a/lib/kokkos/containers/src/Kokkos_Vector.hpp +++ b/lib/kokkos/containers/src/Kokkos_Vector.hpp @@ -53,12 +53,8 @@ */ namespace Kokkos { -template -class vector : public DualView { -public: - typedef typename Space::memory_space memory_space; - typedef typename Space::execution_space execution_space; - typedef typename Kokkos::Device device_type; +template< class Scalar, class Arg1Type = void> +class vector : public DualView { typedef Scalar value_type; typedef Scalar* pointer; @@ -72,7 +68,7 @@ private: size_t _size; typedef size_t size_type; float _extra_storage; - typedef DualView DV; + typedef DualView DV; public: @@ -93,7 +89,7 @@ public: }; - vector(int n, Scalar val=Scalar()):DualView("Vector",size_t(n*(1.1))) { + vector(int n, Scalar val=Scalar()):DualView("Vector",size_t(n*(1.1))) { _size = n; _extra_storage = 1.1; DV::modified_host() = 1; diff --git a/lib/kokkos/containers/unit_tests/CMakeLists.txt b/lib/kokkos/containers/unit_tests/CMakeLists.txt new file mode 100644 index 0000000000..7fff0f835b --- /dev/null +++ b/lib/kokkos/containers/unit_tests/CMakeLists.txt @@ -0,0 +1,40 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../src ) + +SET(SOURCES + UnitTestMain.cpp + TestCuda.cpp + ) + +SET(LIBRARIES kokkoscore) + +IF(Kokkos_ENABLE_Pthread) + LIST( APPEND SOURCES + TestThreads.cpp + ) +ENDIF() + +IF(Kokkos_ENABLE_Serial) + LIST( APPEND SOURCES + TestSerial.cpp + ) +ENDIF() + +IF(Kokkos_ENABLE_OpenMP) + LIST( APPEND SOURCES + TestOpenMP.cpp + ) +ENDIF() + + +TRIBITS_ADD_EXECUTABLE_AND_TEST( + UnitTest + SOURCES ${SOURCES} + COMM serial mpi + NUM_MPI_PROCS 1 + FAIL_REGULAR_EXPRESSION " FAILED " + TESTONLYLIBS kokkos_gtest + ) + diff --git a/lib/kokkos/containers/unit_tests/Makefile b/lib/kokkos/containers/unit_tests/Makefile index 176bfa906e..48e3ff61d0 100644 --- a/lib/kokkos/containers/unit_tests/Makefile +++ b/lib/kokkos/containers/unit_tests/Makefile @@ -6,12 +6,12 @@ vpath %.cpp ${KOKKOS_PATH}/containers/unit_tests default: build_all echo "End Build" - + include $(KOKKOS_PATH)/Makefile.kokkos ifeq ($(KOKKOS_INTERNAL_USE_CUDA), 1) - CXX = nvcc_wrapper + CXX = $(NVCC_WRAPPER) CXXFLAGS ?= -O3 LINK = $(CXX) LDFLAGS ?= -lpthread @@ -56,7 +56,7 @@ KokkosContainers_UnitTest_Cuda: $(OBJ_CUDA) $(KOKKOS_LINK_DEPENDS) KokkosContainers_UnitTest_Threads: $(OBJ_THREADS) $(KOKKOS_LINK_DEPENDS) $(LINK) $(KOKKOS_LDFLAGS) $(LDFLAGS) $(EXTRA_PATH) $(OBJ_THREADS) $(KOKKOS_LIBS) $(LIB) -o KokkosContainers_UnitTest_Threads - + KokkosContainers_UnitTest_OpenMP: $(OBJ_OPENMP) $(KOKKOS_LINK_DEPENDS) $(LINK) $(KOKKOS_LDFLAGS) $(LDFLAGS) $(EXTRA_PATH) $(OBJ_OPENMP) $(KOKKOS_LIBS) $(LIB) -o KokkosContainers_UnitTest_OpenMP @@ -74,11 +74,11 @@ test-openmp: KokkosContainers_UnitTest_OpenMP test-serial: KokkosContainers_UnitTest_Serial ./KokkosContainers_UnitTest_Serial - + build_all: $(TARGETS) test: $(TEST_TARGETS) - + clean: kokkos-clean rm -f *.o $(TARGETS) diff --git a/lib/kokkos/containers/unit_tests/TestComplex.hpp b/lib/kokkos/containers/unit_tests/TestComplex.hpp index 5065d72579..94c04b61f4 100644 --- a/lib/kokkos/containers/unit_tests/TestComplex.hpp +++ b/lib/kokkos/containers/unit_tests/TestComplex.hpp @@ -1,12 +1,12 @@ //@HEADER // ************************************************************************ -// +// // Kokkos v. 2.0 // Copyright (2014) Sandia Corporation -// +// // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, // the U.S. Government retains certain rights in this software. -// +// // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -35,7 +35,7 @@ // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Questions? Contact H. Carter Edwards (hcedwar@sandia.gov) -// +// // ************************************************************************ //@HEADER @@ -43,7 +43,7 @@ #ifndef KOKKOS_TEST_COMPLEX_HPP #define KOKKOS_TEST_COMPLEX_HPP -//#include +#include #include #include @@ -124,14 +124,13 @@ namespace Impl { complex_type z1 (1.0, -1.0); complex_type z2 (-1.0, 1.0); - complex_type z3 = z1 - z2; - ASSERT_TRUE( z3 == complex_type (2.0, -2.0) ); + complex_type z3 = z1 * z2; + ASSERT_TRUE( z3 == complex_type (0.0, 2.0) ); - // Test unary minus. - complex_type z4 (3.0, -4.0); - ASSERT_TRUE( z4 == complex_type (3.0, -4.0) ); - ASSERT_TRUE( -z4 == complex_type (-3.0, 4.0) ); - ASSERT_TRUE( z4 == -complex_type (-3.0, 4.0) ); + // Make sure that std::complex * Kokkos::complex works too. + std::complex z4 (-1.0, 1.0); + complex_type z5 = z4 * z1; + ASSERT_TRUE( z5 == complex_type (0.0, 2.0) ); } template @@ -208,7 +207,7 @@ namespace Impl { typedef Kokkos::View*, Device> view_type; typedef typename view_type::size_type size_type; - typedef Kokkos::complex value_type; + typedef Kokkos::complex value_type; KOKKOS_INLINE_FUNCTION void operator () (const size_type i, Kokkos::complex& sum) const { diff --git a/lib/kokkos/core/CMakeLists.txt b/lib/kokkos/core/CMakeLists.txt new file mode 100644 index 0000000000..42fce6b2f2 --- /dev/null +++ b/lib/kokkos/core/CMakeLists.txt @@ -0,0 +1,11 @@ + + +TRIBITS_SUBPACKAGE(Core) + +ADD_SUBDIRECTORY(src) + +TRIBITS_ADD_TEST_DIRECTORIES(unit_test) +TRIBITS_ADD_TEST_DIRECTORIES(perf_test) + +TRIBITS_SUBPACKAGE_POSTPROCESS() + diff --git a/lib/kokkos/core/cmake/Dependencies.cmake b/lib/kokkos/core/cmake/Dependencies.cmake new file mode 100644 index 0000000000..13ade23a9c --- /dev/null +++ b/lib/kokkos/core/cmake/Dependencies.cmake @@ -0,0 +1,4 @@ +TRIBITS_PACKAGE_DEFINE_DEPENDENCIES( + LIB_OPTIONAL_TPLS Pthread CUDA HWLOC QTHREAD + TEST_OPTIONAL_TPLS CUSPARSE + ) diff --git a/lib/kokkos/core/cmake/KokkosCore_config.h.in b/lib/kokkos/core/cmake/KokkosCore_config.h.in new file mode 100644 index 0000000000..d381c59a2d --- /dev/null +++ b/lib/kokkos/core/cmake/KokkosCore_config.h.in @@ -0,0 +1,50 @@ +#ifndef KOKKOS_CORE_CONFIG_H +#define KOKKOS_CORE_CONFIG_H + +/* The trivial 'src/build_common.sh' creates a config + * that must stay in sync with this file. + */ +#cmakedefine KOKKOS_FOR_SIERRA + +#if !defined( KOKKOS_FOR_SIERRA ) + +#cmakedefine KOKKOS_HAVE_MPI +#cmakedefine KOKKOS_HAVE_CUDA + +// mfh 16 Sep 2014: If passed in on the command line, that overrides +// any value of KOKKOS_USE_CUDA_UVM here. Doing this should prevent build +// warnings like this one: +// +// packages/kokkos/core/src/KokkosCore_config.h:13:1: warning: "KOKKOS_USE_CUDA_UVM" redefined +// +// At some point, we should edit the test-build scripts in +// Trilinos/cmake/ctest/drivers/perseus/, and take +// -DKOKKOS_USE_CUDA_UVM from the command-line arguments there. I +// hesitate to do that now, because I'm not sure if all the files are +// including KokkosCore_config.h (or a header file that includes it) like +// they should. + +#if ! defined(KOKKOS_USE_CUDA_UVM) +#cmakedefine KOKKOS_USE_CUDA_UVM +#endif // ! defined(KOKKOS_USE_CUDA_UVM) + +#cmakedefine KOKKOS_HAVE_PTHREAD +#cmakedefine KOKKOS_HAVE_SERIAL +#cmakedefine KOKKOS_HAVE_QTHREAD +#cmakedefine KOKKOS_HAVE_Winthread +#cmakedefine KOKKOS_HAVE_OPENMP +#cmakedefine KOKKOS_HAVE_HWLOC +#cmakedefine KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK +#cmakedefine KOKKOS_HAVE_CXX11 +#cmakedefine KOKKOS_HAVE_CUSPARSE +#cmakedefine KOKKOS_ENABLE_PROFILING_COLLECT_KERNEL_DATA +#cmakedefine KOKKOS_ENABLE_PROFILING_AGGREGATE_MPI + +// Don't forbid users from defining this macro on the command line, +// but still make sure that CMake logic can control its definition. +#if ! defined(KOKKOS_HAVE_CXX11_DISPATCH_LAMBDA) +#cmakedefine KOKKOS_HAVE_CXX11_DISPATCH_LAMBDA 1 +#endif // KOKKOS_HAVE_CXX11_DISPATCH_LAMBDA + +#endif // KOKKOS_FOR_SIERRA +#endif // KOKKOS_CORE_CONFIG_H diff --git a/lib/kokkos/core/perf_test/CMakeLists.txt b/lib/kokkos/core/perf_test/CMakeLists.txt new file mode 100644 index 0000000000..34aa81e92c --- /dev/null +++ b/lib/kokkos/core/perf_test/CMakeLists.txt @@ -0,0 +1,18 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINRARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +SET(SOURCES + PerfTestMain.cpp + PerfTestHost.cpp + PerfTestCuda.cpp + ) + +TRIBITS_ADD_EXECUTABLE_AND_TEST( + PerfTest + SOURCES ${SOURCES} + COMM serial mpi + NUM_MPI_PROCS 1 + FAIL_REGULAR_EXPRESSION " FAILED " + TESTONLYLIBS kokkos_gtest + ) diff --git a/lib/kokkos/core/perf_test/Makefile b/lib/kokkos/core/perf_test/Makefile index 2bf189a22f..8fa1fbfc3c 100644 --- a/lib/kokkos/core/perf_test/Makefile +++ b/lib/kokkos/core/perf_test/Makefile @@ -1,17 +1,17 @@ KOKKOS_PATH = ../.. -GTEST_PATH = ../../TPL/gtest +GTEST_PATH = ../../tpls/gtest vpath %.cpp ${KOKKOS_PATH}/core/perf_test default: build_all echo "End Build" - + include $(KOKKOS_PATH)/Makefile.kokkos ifeq ($(KOKKOS_INTERNAL_USE_CUDA), 1) - CXX = nvcc_wrapper + CXX = $(NVCC_WRAPPER) CXXFLAGS ?= -O3 LINK = $(CXX) LDFLAGS ?= -lpthread @@ -47,12 +47,12 @@ test-performance: KokkosCore_PerformanceTest test-atomic: KokkosCore_PerformanceTest_Atomics ./KokkosCore_PerformanceTest_Atomics - + build_all: $(TARGETS) test: $(TEST_TARGETS) - + clean: kokkos-clean rm -f *.o $(TARGETS) diff --git a/lib/kokkos/core/perf_test/PerfTestCuda.cpp b/lib/kokkos/core/perf_test/PerfTestCuda.cpp index 1263a7672a..4a4bc13cd4 100644 --- a/lib/kokkos/core/perf_test/PerfTestCuda.cpp +++ b/lib/kokkos/core/perf_test/PerfTestCuda.cpp @@ -174,7 +174,7 @@ struct TextureFetch TEST_F( cuda, texture_double ) { printf("Random reduce of double through texture fetch\n"); - for (int i=1; i<=27; ++i) { + for (int i=1; i<=26; ++i) { int size = 1< +struct ViewOperatorBoundsErrorAbort< Kokkos::CudaSpace > { + KOKKOS_INLINE_FUNCTION + static void apply( const size_t rank + , const size_t n0 , const size_t n1 + , const size_t n2 , const size_t n3 + , const size_t n4 , const size_t n5 + , const size_t n6 , const size_t n7 + , const size_t i0 , const size_t i1 + , const size_t i2 , const size_t i3 + , const size_t i4 , const size_t i5 + , const size_t i6 , const size_t i7 ) + { + const int r = + ( n0 <= i0 ? 0 : + ( n1 <= i1 ? 1 : + ( n2 <= i2 ? 2 : + ( n3 <= i3 ? 3 : + ( n4 <= i4 ? 4 : + ( n5 <= i5 ? 5 : + ( n6 <= i6 ? 6 : 7 ))))))); + const size_t n = + ( n0 <= i0 ? n0 : + ( n1 <= i1 ? n1 : + ( n2 <= i2 ? n2 : + ( n3 <= i3 ? n3 : + ( n4 <= i4 ? n4 : + ( n5 <= i5 ? n5 : + ( n6 <= i6 ? n6 : n7 ))))))); + const size_t i = + ( n0 <= i0 ? i0 : + ( n1 <= i1 ? i1 : + ( n2 <= i2 ? i2 : + ( n3 <= i3 ? i3 : + ( n4 <= i4 ? i4 : + ( n5 <= i5 ? i5 : + ( n6 <= i6 ? i6 : i7 ))))))); + printf("Cuda view array bounds error index %d : FAILED %lu < %lu\n" , r , i , n ); + Kokkos::Impl::cuda_abort("Cuda view array bounds error"); + } +}; + +} // namespace Impl +} // namespace Experimental +} // namespace Kokkos + //---------------------------------------------------------------------------- +//---------------------------------------------------------------------------- + +namespace Kokkos { +namespace Experimental { +namespace Impl { + // Cuda Texture fetches can be performed for 4, 8 and 16 byte objects (int,int2,int4) // Via reinterpret_case this can be used to support all scalar types of those sizes. // Any other scalar type falls back to either normal reads out of global memory, @@ -130,7 +182,6 @@ struct CudaTextureFetch { CudaTextureFetch( const ValueType * const arg_ptr , Kokkos::Experimental::Impl::SharedAllocationRecord< CudaMemorySpace , void > & record ) - // 'attach_texture_object' returns 0 when __CUDA_ARCH__ < 300 : m_obj( record.template attach_texture_object< AliasType >() ) , m_ptr( arg_ptr ) , m_offset( record.attach_texture_object_offset( reinterpret_cast( arg_ptr ) ) ) diff --git a/lib/kokkos/core/src/Cuda/Kokkos_CudaExec.hpp b/lib/kokkos/core/src/Cuda/Kokkos_CudaExec.hpp index c1b2d51c47..ca03990162 100644 --- a/lib/kokkos/core/src/Cuda/Kokkos_CudaExec.hpp +++ b/lib/kokkos/core/src/Cuda/Kokkos_CudaExec.hpp @@ -208,9 +208,9 @@ struct CudaParallelLaunch< DriverType , true > { Kokkos::Impl::throw_runtime_exception( std::string("CudaParallelLaunch FAILED: shared memory request is too large") ); } else if ( shmem ) { - cudaFuncSetCacheConfig( cuda_parallel_launch_constant_memory< DriverType > , cudaFuncCachePreferShared ); + CUDA_SAFE_CALL( cudaFuncSetCacheConfig( cuda_parallel_launch_constant_memory< DriverType > , cudaFuncCachePreferShared ) ); } else { - cudaFuncSetCacheConfig( cuda_parallel_launch_constant_memory< DriverType > , cudaFuncCachePreferL1 ); + CUDA_SAFE_CALL( cudaFuncSetCacheConfig( cuda_parallel_launch_constant_memory< DriverType > , cudaFuncCachePreferL1 ) ); } // Copy functor to constant memory on the device @@ -246,9 +246,9 @@ struct CudaParallelLaunch< DriverType , false > { Kokkos::Impl::throw_runtime_exception( std::string("CudaParallelLaunch FAILED: shared memory request is too large") ); } else if ( shmem ) { - cudaFuncSetCacheConfig( cuda_parallel_launch_local_memory< DriverType > , cudaFuncCachePreferShared ); + CUDA_SAFE_CALL( cudaFuncSetCacheConfig( cuda_parallel_launch_local_memory< DriverType > , cudaFuncCachePreferShared ) ); } else { - cudaFuncSetCacheConfig( cuda_parallel_launch_local_memory< DriverType > , cudaFuncCachePreferL1 ); + CUDA_SAFE_CALL( cudaFuncSetCacheConfig( cuda_parallel_launch_local_memory< DriverType > , cudaFuncCachePreferL1 ) ); } int* lock_array_ptr = lock_array_cuda_space_ptr(); diff --git a/lib/kokkos/core/src/Cuda/Kokkos_CudaSpace.cpp b/lib/kokkos/core/src/Cuda/Kokkos_CudaSpace.cpp index 13316cb63e..829ad03a48 100644 --- a/lib/kokkos/core/src/Cuda/Kokkos_CudaSpace.cpp +++ b/lib/kokkos/core/src/Cuda/Kokkos_CudaSpace.cpp @@ -45,6 +45,7 @@ #include #include #include +#include #include /* only compile this file if CUDA is enabled for Kokkos */ @@ -106,6 +107,8 @@ void DeepCopyAsyncCuda( void * dst , const void * src , size_t n) { namespace Kokkos { +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + namespace { void texture_object_attach_impl( Impl::AllocationTracker const & tracker @@ -164,6 +167,8 @@ void CudaSpace::texture_object_attach( Impl::AllocationTracker const & tracker texture_object_attach_impl( tracker, type_size, desc ); } +#endif /* #if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) */ + void CudaSpace::access_error() { const std::string msg("Kokkos::CudaSpace::access_error attempt to execute Cuda function from non-Cuda space" ); @@ -178,6 +183,8 @@ void CudaSpace::access_error( const void * const ) /*--------------------------------------------------------------------------*/ +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + Impl::AllocationTracker CudaUVMSpace::allocate_and_track( const std::string & label, const size_t size ) { return Impl::AllocationTracker( allocator(), size, label); @@ -191,6 +198,8 @@ void CudaUVMSpace::texture_object_attach( Impl::AllocationTracker const & track texture_object_attach_impl( tracker, type_size, desc ); } +#endif /* #if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) */ + bool CudaUVMSpace::available() { #if defined( CUDA_VERSION ) && ( 6000 <= CUDA_VERSION ) && !defined(__APPLE__) @@ -203,11 +212,15 @@ bool CudaUVMSpace::available() /*--------------------------------------------------------------------------*/ +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + Impl::AllocationTracker CudaHostPinnedSpace::allocate_and_track( const std::string & label, const size_t size ) { return Impl::AllocationTracker( allocator(), size, label); } +#endif /* #if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) */ + } // namespace Kokkos /*--------------------------------------------------------------------------*/ @@ -301,8 +314,18 @@ attach_texture_object( const unsigned sizeof_alias , void * const alloc_ptr , size_t const alloc_size ) { - // Only valid for 300 <= __CUDA_ARCH__ - // otherwise return zero. + enum { TEXTURE_BOUND_1D = 1u << 27 }; + + if ( ( alloc_ptr == 0 ) || ( sizeof_alias * TEXTURE_BOUND_1D <= alloc_size ) ) { + std::ostringstream msg ; + msg << "Kokkos::CudaSpace ERROR: Cannot attach texture object to" + << " alloc_ptr(" << alloc_ptr << ")" + << " alloc_size(" << alloc_size << ")" + << " max_size(" << ( sizeof_alias * TEXTURE_BOUND_1D ) << ")" ; + std::cerr << msg.str() << std::endl ; + std::cerr.flush(); + Kokkos::Impl::throw_runtime_exception( msg.str() ); + } ::cudaTextureObject_t tex_obj ; @@ -505,6 +528,133 @@ SharedAllocationRecord( const Kokkos::CudaHostPinnedSpace & arg_space ); } +//---------------------------------------------------------------------------- + +void * SharedAllocationRecord< Kokkos::CudaSpace , void >:: +allocate_tracked( const Kokkos::CudaSpace & arg_space + , const std::string & arg_alloc_label + , const size_t arg_alloc_size ) +{ + if ( ! arg_alloc_size ) return (void *) 0 ; + + SharedAllocationRecord * const r = + allocate( arg_space , arg_alloc_label , arg_alloc_size ); + + RecordBase::increment( r ); + + return r->data(); +} + +void SharedAllocationRecord< Kokkos::CudaSpace , void >:: +deallocate_tracked( void * const arg_alloc_ptr ) +{ + if ( arg_alloc_ptr != 0 ) { + SharedAllocationRecord * const r = get_record( arg_alloc_ptr ); + + RecordBase::decrement( r ); + } +} + +void * SharedAllocationRecord< Kokkos::CudaSpace , void >:: +reallocate_tracked( void * const arg_alloc_ptr + , const size_t arg_alloc_size ) +{ + SharedAllocationRecord * const r_old = get_record( arg_alloc_ptr ); + SharedAllocationRecord * const r_new = allocate( r_old->m_space , r_old->get_label() , arg_alloc_size ); + + Kokkos::Impl::DeepCopy( r_new->data() , r_old->data() + , std::min( r_old->size() , r_new->size() ) ); + + RecordBase::increment( r_new ); + RecordBase::decrement( r_old ); + + return r_new->data(); +} + +void * SharedAllocationRecord< Kokkos::CudaUVMSpace , void >:: +allocate_tracked( const Kokkos::CudaUVMSpace & arg_space + , const std::string & arg_alloc_label + , const size_t arg_alloc_size ) +{ + if ( ! arg_alloc_size ) return (void *) 0 ; + + SharedAllocationRecord * const r = + allocate( arg_space , arg_alloc_label , arg_alloc_size ); + + RecordBase::increment( r ); + + return r->data(); +} + +void SharedAllocationRecord< Kokkos::CudaUVMSpace , void >:: +deallocate_tracked( void * const arg_alloc_ptr ) +{ + if ( arg_alloc_ptr != 0 ) { + SharedAllocationRecord * const r = get_record( arg_alloc_ptr ); + + RecordBase::decrement( r ); + } +} + +void * SharedAllocationRecord< Kokkos::CudaUVMSpace , void >:: +reallocate_tracked( void * const arg_alloc_ptr + , const size_t arg_alloc_size ) +{ + SharedAllocationRecord * const r_old = get_record( arg_alloc_ptr ); + SharedAllocationRecord * const r_new = allocate( r_old->m_space , r_old->get_label() , arg_alloc_size ); + + Kokkos::Impl::DeepCopy( r_new->data() , r_old->data() + , std::min( r_old->size() , r_new->size() ) ); + + RecordBase::increment( r_new ); + RecordBase::decrement( r_old ); + + return r_new->data(); +} + +void * SharedAllocationRecord< Kokkos::CudaHostPinnedSpace , void >:: +allocate_tracked( const Kokkos::CudaHostPinnedSpace & arg_space + , const std::string & arg_alloc_label + , const size_t arg_alloc_size ) +{ + if ( ! arg_alloc_size ) return (void *) 0 ; + + SharedAllocationRecord * const r = + allocate( arg_space , arg_alloc_label , arg_alloc_size ); + + RecordBase::increment( r ); + + return r->data(); +} + +void SharedAllocationRecord< Kokkos::CudaHostPinnedSpace , void >:: +deallocate_tracked( void * const arg_alloc_ptr ) +{ + if ( arg_alloc_ptr != 0 ) { + SharedAllocationRecord * const r = get_record( arg_alloc_ptr ); + + RecordBase::decrement( r ); + } +} + +void * SharedAllocationRecord< Kokkos::CudaHostPinnedSpace , void >:: +reallocate_tracked( void * const arg_alloc_ptr + , const size_t arg_alloc_size ) +{ + SharedAllocationRecord * const r_old = get_record( arg_alloc_ptr ); + SharedAllocationRecord * const r_new = allocate( r_old->m_space , r_old->get_label() , arg_alloc_size ); + + Kokkos::Impl::DeepCopy( r_new->data() , r_old->data() + , std::min( r_old->size() , r_new->size() ) ); + + RecordBase::increment( r_new ); + RecordBase::decrement( r_old ); + + return r_new->data(); +} + +//---------------------------------------------------------------------------- + SharedAllocationRecord< Kokkos::CudaSpace , void > * SharedAllocationRecord< Kokkos::CudaSpace , void >::get_record( void * alloc_ptr ) { @@ -514,15 +664,17 @@ SharedAllocationRecord< Kokkos::CudaSpace , void >::get_record( void * alloc_ptr #if 0 // Copy the header from the allocation - SharedAllocationHeader head ; + Header head ; - SharedAllocationHeader const * const head_cuda = Header::get_header( alloc_ptr ); + Header const * const head_cuda = alloc_ptr ? Header::get_header( alloc_ptr ) : (Header*) 0 ; - Kokkos::Impl::DeepCopy::DeepCopy( & head , head_cuda , sizeof(SharedAllocationHeader) ); + if ( alloc_ptr ) { + Kokkos::Impl::DeepCopy::DeepCopy( & head , head_cuda , sizeof(SharedAllocationHeader) ); + } - RecordCuda * const record = static_cast< RecordCuda * >( head.m_record ); + RecordCuda * const record = alloc_ptr ? static_cast< RecordCuda * >( head.m_record ) : (RecordCuda *) 0 ; - if ( record->m_alloc_ptr != head_cuda ) { + if ( ! alloc_ptr || record->m_alloc_ptr != head_cuda ) { Kokkos::Impl::throw_runtime_exception( std::string("Kokkos::Experimental::Impl::SharedAllocationRecord< Kokkos::CudaSpace , void >::get_record ERROR" ) ); } @@ -548,9 +700,9 @@ SharedAllocationRecord< Kokkos::CudaUVMSpace , void >::get_record( void * alloc_ using Header = SharedAllocationHeader ; using RecordCuda = SharedAllocationRecord< Kokkos::CudaUVMSpace , void > ; - Header * const h = reinterpret_cast< Header * >( alloc_ptr ) - 1 ; + Header * const h = alloc_ptr ? reinterpret_cast< Header * >( alloc_ptr ) - 1 : (Header *) 0 ; - if ( h->m_record->m_alloc_ptr != h ) { + if ( ! alloc_ptr || h->m_record->m_alloc_ptr != h ) { Kokkos::Impl::throw_runtime_exception( std::string("Kokkos::Experimental::Impl::SharedAllocationRecord< Kokkos::CudaUVMSpace , void >::get_record ERROR" ) ); } @@ -563,9 +715,9 @@ SharedAllocationRecord< Kokkos::CudaHostPinnedSpace , void >::get_record( void * using Header = SharedAllocationHeader ; using RecordCuda = SharedAllocationRecord< Kokkos::CudaHostPinnedSpace , void > ; - Header * const h = reinterpret_cast< Header * >( alloc_ptr ) - 1 ; + Header * const h = alloc_ptr ? reinterpret_cast< Header * >( alloc_ptr ) - 1 : (Header *) 0 ; - if ( h->m_record->m_alloc_ptr != h ) { + if ( ! alloc_ptr || h->m_record->m_alloc_ptr != h ) { Kokkos::Impl::throw_runtime_exception( std::string("Kokkos::Experimental::Impl::SharedAllocationRecord< Kokkos::CudaHostPinnedSpace , void >::get_record ERROR" ) ); } @@ -592,14 +744,25 @@ print_records( std::ostream & s , const Kokkos::CudaSpace & space , bool detail head.m_label[0] = 0 ; } - snprintf( buffer , 256 , "Cuda addr( 0x%.12lx ) list( 0x%.12lx 0x%.12lx ) extent[ 0x%.12lx + %.8ld ] count(%d) dealloc(0x%.12lx) %s\n" - , reinterpret_cast( r ) - , reinterpret_cast( r->m_prev ) - , reinterpret_cast( r->m_next ) - , reinterpret_cast( r->m_alloc_ptr ) + //Formatting dependent on sizeof(uintptr_t) + const char * format_string; + + if (sizeof(uintptr_t) == sizeof(unsigned long)) { + format_string = "Cuda addr( 0x%.12lx ) list( 0x%.12lx 0x%.12lx ) extent[ 0x%.12lx + %.8ld ] count(%d) dealloc(0x%.12lx) %s\n"; + } + else if (sizeof(uintptr_t) == sizeof(unsigned long long)) { + format_string = "Cuda addr( 0x%.12llx ) list( 0x%.12llx 0x%.12llx ) extent[ 0x%.12llx + %.8ld ] count(%d) dealloc(0x%.12llx) %s\n"; + } + + snprintf( buffer , 256 + , format_string + , reinterpret_cast( r ) + , reinterpret_cast( r->m_prev ) + , reinterpret_cast( r->m_next ) + , reinterpret_cast( r->m_alloc_ptr ) , r->m_alloc_size , r->m_count - , reinterpret_cast( r->m_dealloc ) + , reinterpret_cast( r->m_dealloc ) , head.m_label ); std::cout << buffer ; @@ -612,8 +775,19 @@ print_records( std::ostream & s , const Kokkos::CudaSpace & space , bool detail Kokkos::Impl::DeepCopy::DeepCopy( & head , r->m_alloc_ptr , sizeof(SharedAllocationHeader) ); - snprintf( buffer , 256 , "Cuda [ 0x%.12lx + %ld ] %s\n" - , reinterpret_cast< unsigned long >( r->data() ) + //Formatting dependent on sizeof(uintptr_t) + const char * format_string; + + if (sizeof(uintptr_t) == sizeof(unsigned long)) { + format_string = "Cuda [ 0x%.12lx + %ld ] %s\n"; + } + else if (sizeof(uintptr_t) == sizeof(unsigned long long)) { + format_string = "Cuda [ 0x%.12llx + %ld ] %s\n"; + } + + snprintf( buffer , 256 + , format_string + , reinterpret_cast< uintptr_t >( r->data() ) , r->size() , head.m_label ); diff --git a/lib/kokkos/core/src/Cuda/Kokkos_Cuda_Alloc.hpp b/lib/kokkos/core/src/Cuda/Kokkos_Cuda_Alloc.hpp index e1314c0e51..5746176274 100644 --- a/lib/kokkos/core/src/Cuda/Kokkos_Cuda_Alloc.hpp +++ b/lib/kokkos/core/src/Cuda/Kokkos_Cuda_Alloc.hpp @@ -71,7 +71,7 @@ shared_allocation_record( Kokkos::CudaSpace const & arg_space DestructFunctor * const functor = reinterpret_cast< DestructFunctor * >( - reinterpret_cast< unsigned long >( record ) + sizeof(SharedAllocationRecord) ); + reinterpret_cast< uintptr_t >( record ) + sizeof(SharedAllocationRecord) ); new( functor ) DestructFunctor( arg_destruct ); diff --git a/lib/kokkos/core/src/Cuda/Kokkos_Cuda_BasicAllocators.cpp b/lib/kokkos/core/src/Cuda/Kokkos_Cuda_BasicAllocators.cpp index 8c8c5e47a5..1f409dffaa 100644 --- a/lib/kokkos/core/src/Cuda/Kokkos_Cuda_BasicAllocators.cpp +++ b/lib/kokkos/core/src/Cuda/Kokkos_Cuda_BasicAllocators.cpp @@ -43,6 +43,8 @@ #include +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + /* only compile this file if CUDA is enabled for Kokkos */ #ifdef KOKKOS_HAVE_CUDA @@ -56,6 +58,7 @@ namespace Kokkos { namespace Impl { /*--------------------------------------------------------------------------*/ + TextureAttribute::TextureAttribute( void * const alloc_ptr , size_t alloc_size , cudaChannelFormatDesc const & desc @@ -190,3 +193,6 @@ void * CudaHostAllocator::reallocate(void * old_ptr, size_t old_size, size_t new }} // namespace Kokkos::Impl #endif //KOKKOS_HAVE_CUDA + +#endif /* #if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) */ + diff --git a/lib/kokkos/core/src/Cuda/Kokkos_Cuda_BasicAllocators.hpp b/lib/kokkos/core/src/Cuda/Kokkos_Cuda_BasicAllocators.hpp index 86fe1c901b..58445ab07b 100644 --- a/lib/kokkos/core/src/Cuda/Kokkos_Cuda_BasicAllocators.hpp +++ b/lib/kokkos/core/src/Cuda/Kokkos_Cuda_BasicAllocators.hpp @@ -46,6 +46,8 @@ #include +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + /* only compile this file if CUDA is enabled for Kokkos */ #ifdef KOKKOS_HAVE_CUDA @@ -85,7 +87,6 @@ struct TextureAttribute : public AllocatorAttributeBase ~TextureAttribute(); }; - /// class CudaUnmanagedAllocator /// does nothing when deallocate(ptr,size) is called struct CudaUnmanagedAllocator @@ -184,4 +185,6 @@ public: #endif //KOKKOS_HAVE_CUDA +#endif /* #if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) */ + #endif //KOKKOS_CUDA_BASIC_ALLOCATORS_HPP diff --git a/lib/kokkos/core/src/Cuda/Kokkos_Cuda_Impl.cpp b/lib/kokkos/core/src/Cuda/Kokkos_Cuda_Impl.cpp index b7c3a62d39..de00b04152 100644 --- a/lib/kokkos/core/src/Cuda/Kokkos_Cuda_Impl.cpp +++ b/lib/kokkos/core/src/Cuda/Kokkos_Cuda_Impl.cpp @@ -222,10 +222,14 @@ private: CudaInternal( const CudaInternal & ); CudaInternal & operator = ( const CudaInternal & ); +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + AllocationTracker m_scratchFlagsTracker; AllocationTracker m_scratchSpaceTracker; AllocationTracker m_scratchUnifiedTracker; +#endif + public: @@ -482,6 +486,32 @@ void CudaInternal::initialize( int cuda_device_id , int stream_count ) Kokkos::Impl::throw_runtime_exception( msg.str() ); } + #ifdef KOKKOS_CUDA_USE_UVM + if(!cuda_launch_blocking()) { + std::cout << "Kokkos::Cuda::initialize WARNING: Cuda is allocating into UVMSpace by default" << std::endl; + std::cout << " without setting CUDA_LAUNCH_BLOCKING=1." << std::endl; + std::cout << " The code must call Cuda::fence() after each kernel" << std::endl; + std::cout << " or will likely crash when accessing data on the host." << std::endl; + } + + const char * env_force_device_alloc = getenv("CUDA_MANAGED_FORCE_DEVICE_ALLOC"); + bool force_device_alloc; + if (env_force_device_alloc == 0) force_device_alloc=false; + else force_device_alloc=atoi(env_force_device_alloc)!=0; + + const char * env_visible_devices = getenv("CUDA_VISIBLE_DEVICES"); + bool visible_devices_one=true; + if (env_visible_devices == 0) visible_devices_one=false; + + if(!visible_devices_one && !force_device_alloc) { + std::cout << "Kokkos::Cuda::initialize WARNING: Cuda is allocating into UVMSpace by default" << std::endl; + std::cout << " without setting CUDA_MANAGED_FORCE_DEVICE_ALLOC=1 or " << std::endl; + std::cout << " setting CUDA_VISIBLE_DEVICES." << std::endl; + std::cout << " This could on multi GPU systems lead to severe performance" << std::endl; + std::cout << " penalties." << std::endl; + } + #endif + // Init the array for used for arbitrarily sized atomics Impl::init_lock_array_cuda_space(); @@ -501,9 +531,27 @@ CudaInternal::scratch_flags( const Cuda::size_type size ) m_scratchFlagsCount = ( size + sizeScratchGrain - 1 ) / sizeScratchGrain ; +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + m_scratchFlagsTracker = CudaSpace::allocate_and_track( std::string("InternalScratchFlags") , sizeof( ScratchGrain ) * m_scratchFlagsCount ); + m_scratchFlags = reinterpret_cast(m_scratchFlagsTracker.alloc_ptr()); +#else + + typedef Kokkos::Experimental::Impl::SharedAllocationRecord< Kokkos::CudaSpace , void > Record ; + + Record * const r = Record::allocate( Kokkos::CudaSpace() + , "InternalScratchFlags" + , ( sizeof( ScratchGrain ) * m_scratchFlagsCount ) ); + + Record::increment( r ); + + m_scratchFlags = reinterpret_cast( r->data() ); + +#endif + + CUDA_SAFE_CALL( cudaMemset( m_scratchFlags , 0 , m_scratchFlagsCount * sizeScratchGrain ) ); } @@ -517,9 +565,26 @@ CudaInternal::scratch_space( const Cuda::size_type size ) m_scratchSpaceCount = ( size + sizeScratchGrain - 1 ) / sizeScratchGrain ; +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + m_scratchSpaceTracker = CudaSpace::allocate_and_track( std::string("InternalScratchSpace") , sizeof( ScratchGrain ) * m_scratchSpaceCount ); + m_scratchSpace = reinterpret_cast(m_scratchSpaceTracker.alloc_ptr()); +#else + + typedef Kokkos::Experimental::Impl::SharedAllocationRecord< Kokkos::CudaSpace , void > Record ; + + Record * const r = Record::allocate( Kokkos::CudaSpace() + , "InternalScratchSpace" + , ( sizeof( ScratchGrain ) * m_scratchSpaceCount ) ); + + Record::increment( r ); + + m_scratchSpace = reinterpret_cast( r->data() ); + +#endif + } return m_scratchSpace ; @@ -533,8 +598,26 @@ CudaInternal::scratch_unified( const Cuda::size_type size ) m_scratchUnifiedCount = ( size + sizeScratchGrain - 1 ) / sizeScratchGrain ; +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + m_scratchUnifiedTracker = CudaHostPinnedSpace::allocate_and_track( std::string("InternalScratchUnified") , sizeof( ScratchGrain ) * m_scratchUnifiedCount ); + m_scratchUnified = reinterpret_cast( m_scratchUnifiedTracker.alloc_ptr() ); + +#else + + typedef Kokkos::Experimental::Impl::SharedAllocationRecord< Kokkos::CudaHostPinnedSpace , void > Record ; + + Record * const r = Record::allocate( Kokkos::CudaHostPinnedSpace() + , "InternalScratchUnified" + , ( sizeof( ScratchGrain ) * m_scratchUnifiedCount ) ); + + Record::increment( r ); + + m_scratchUnified = reinterpret_cast( r->data() ); + +#endif + } return m_scratchUnified ; @@ -555,10 +638,23 @@ void CudaInternal::finalize() ::free( m_stream ); } +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + m_scratchSpaceTracker.clear(); m_scratchFlagsTracker.clear(); m_scratchUnifiedTracker.clear(); +#else + + typedef Kokkos::Experimental::Impl::SharedAllocationRecord< CudaSpace > RecordCuda ; + typedef Kokkos::Experimental::Impl::SharedAllocationRecord< CudaHostPinnedSpace > RecordHost ; + + RecordCuda::decrement( RecordCuda::get_record( m_scratchFlags ) ); + RecordCuda::decrement( RecordCuda::get_record( m_scratchSpace ) ); + RecordHost::decrement( RecordHost::get_record( m_scratchUnified ) ); + +#endif + m_cudaDev = -1 ; m_maxWarpCount = 0 ; m_maxBlock = 0 ; diff --git a/lib/kokkos/core/src/Cuda/Kokkos_Cuda_Internal.hpp b/lib/kokkos/core/src/Cuda/Kokkos_Cuda_Internal.hpp index dd8a08729b..328857d997 100644 --- a/lib/kokkos/core/src/Cuda/Kokkos_Cuda_Internal.hpp +++ b/lib/kokkos/core/src/Cuda/Kokkos_Cuda_Internal.hpp @@ -43,7 +43,7 @@ #ifndef KOKKOS_CUDA_INTERNAL_HPP #define KOKKOS_CUDA_INTERNAL_HPP - +#include #include /* only compile this file if CUDA is enabled for Kokkos */ @@ -53,18 +53,21 @@ namespace Kokkos { namespace Impl { +template +struct CudaGetMaxBlockSize; + +template +int cuda_get_max_block_size(const typename DriverType::functor_type & f, const size_t vector_length, const size_t shmem_extra) { + return CudaGetMaxBlockSize::get_block_size(f,vector_length, shmem_extra); +} + template -int cuda_get_max_block_size(const typename DriverType::functor_type & f) { -#if ( CUDA_VERSION < 6050 ) - return 256; -#else - bool Large = ( CudaTraits::ConstantMemoryUseThreshold < sizeof(DriverType) ); - - int numBlocks; - if(Large) { +struct CudaGetMaxBlockSize { + static int get_block_size(const typename DriverType::functor_type & f, const size_t vector_length, const size_t shmem_extra) { + int numBlocks; int blockSize=32; - int sharedmem = FunctorTeamShmemSize< typename DriverType::functor_type >::value( f , blockSize ); + int sharedmem = shmem_extra + FunctorTeamShmemSize< typename DriverType::functor_type >::value( f , blockSize/vector_length ); cudaOccupancyMaxActiveBlocksPerMultiprocessor( &numBlocks, cuda_parallel_launch_constant_memory, @@ -73,7 +76,7 @@ int cuda_get_max_block_size(const typename DriverType::functor_type & f) { while (blockSize<1024 && numBlocks>0) { blockSize*=2; - sharedmem = FunctorTeamShmemSize< typename DriverType::functor_type >::value( f , blockSize ); + sharedmem = shmem_extra + FunctorTeamShmemSize< typename DriverType::functor_type >::value( f , blockSize/vector_length); cudaOccupancyMaxActiveBlocksPerMultiprocessor( &numBlocks, @@ -83,9 +86,16 @@ int cuda_get_max_block_size(const typename DriverType::functor_type & f) { } if(numBlocks>0) return blockSize; else return blockSize/2; - } else { + } +}; + +template +struct CudaGetMaxBlockSize { + static int get_block_size(const typename DriverType::functor_type & f, const size_t vector_length, const size_t shmem_extra) { + int numBlocks; + int blockSize=32; - int sharedmem = FunctorTeamShmemSize< typename DriverType::functor_type >::value( f , blockSize ); + int sharedmem = shmem_extra + FunctorTeamShmemSize< typename DriverType::functor_type >::value( f , blockSize/vector_length ); cudaOccupancyMaxActiveBlocksPerMultiprocessor( &numBlocks, cuda_parallel_launch_local_memory, @@ -94,7 +104,7 @@ int cuda_get_max_block_size(const typename DriverType::functor_type & f) { while (blockSize<1024 && numBlocks>0) { blockSize*=2; - sharedmem = FunctorTeamShmemSize< typename DriverType::functor_type >::value( f , blockSize ); + sharedmem = shmem_extra + FunctorTeamShmemSize< typename DriverType::functor_type >::value( f , blockSize/vector_length ); cudaOccupancyMaxActiveBlocksPerMultiprocessor( &numBlocks, @@ -105,42 +115,58 @@ int cuda_get_max_block_size(const typename DriverType::functor_type & f) { if(numBlocks>0) return blockSize; else return blockSize/2; } -#endif +}; + + + +template +struct CudaGetOptBlockSize; + +template +int cuda_get_opt_block_size(const typename DriverType::functor_type & f, const size_t vector_length, const size_t shmem_extra) { + return CudaGetOptBlockSize::get_block_size(f,vector_length,shmem_extra); } template -int cuda_get_opt_block_size(const typename DriverType::functor_type & f) { -#if ( CUDA_VERSION < 6050 ) - return 256; -#else - bool Large = ( CudaTraits::ConstantMemoryUseThreshold < sizeof(DriverType) ); +struct CudaGetOptBlockSize { + static int get_block_size(const typename DriverType::functor_type & f, const size_t vector_length, const size_t shmem_extra) { + int blockSize=16; + int numBlocks; + int sharedmem; + int maxOccupancy=0; + int bestBlockSize=0; - int blockSize=16; - int numBlocks; - int sharedmem; - int maxOccupancy=0; - int bestBlockSize=0; - - if(Large) { while(blockSize<1024) { blockSize*=2; //calculate the occupancy with that optBlockSize and check whether its larger than the largest one found so far - sharedmem = FunctorTeamShmemSize< typename DriverType::functor_type >::value( f , blockSize ); + sharedmem = shmem_extra + FunctorTeamShmemSize< typename DriverType::functor_type >::value( f , blockSize/vector_length ); cudaOccupancyMaxActiveBlocksPerMultiprocessor( &numBlocks, cuda_parallel_launch_constant_memory, blockSize, sharedmem); if(maxOccupancy < numBlocks*blockSize) { - maxOccupancy = numBlocks*blockSize; - bestBlockSize = blockSize; + maxOccupancy = numBlocks*blockSize; + bestBlockSize = blockSize; } } - } else { + return bestBlockSize; + } +}; + +template +struct CudaGetOptBlockSize { + static int get_block_size(const typename DriverType::functor_type & f, const size_t vector_length, const size_t shmem_extra) { + int blockSize=16; + int numBlocks; + int sharedmem; + int maxOccupancy=0; + int bestBlockSize=0; + while(blockSize<1024) { blockSize*=2; - sharedmem = FunctorTeamShmemSize< typename DriverType::functor_type >::value( f , blockSize ); + sharedmem = shmem_extra + FunctorTeamShmemSize< typename DriverType::functor_type >::value( f , blockSize/vector_length ); cudaOccupancyMaxActiveBlocksPerMultiprocessor( &numBlocks, @@ -153,10 +179,9 @@ int cuda_get_opt_block_size(const typename DriverType::functor_type & f) { bestBlockSize = blockSize; } } + return bestBlockSize; } - return bestBlockSize; -#endif -} +}; }} // namespace Kokkos::Impl diff --git a/lib/kokkos/core/src/Cuda/Kokkos_Cuda_Parallel.hpp b/lib/kokkos/core/src/Cuda/Kokkos_Cuda_Parallel.hpp index 3aea9be1d9..003aac277c 100644 --- a/lib/kokkos/core/src/Cuda/Kokkos_Cuda_Parallel.hpp +++ b/lib/kokkos/core/src/Cuda/Kokkos_Cuda_Parallel.hpp @@ -45,6 +45,7 @@ #define KOKKOS_CUDA_PARALLEL_HPP #include +#include #include #include @@ -252,6 +253,7 @@ private: const int m_league_size ; const int m_team_size ; const int m_vector_length ; + const size_t m_scratch_size ; public: @@ -306,43 +308,135 @@ public: inline int vector_length() const { return m_vector_length ; } inline int team_size() const { return m_team_size ; } inline int league_size() const { return m_league_size ; } + inline size_t scratch_size() const { return m_scratch_size ; } /** \brief Specify league size, request team size */ - TeamPolicy( execution_space & , int league_size_ , int team_size_request , int vector_length_request = 1 ) + TeamPolicy( execution_space & + , int league_size_ + , int team_size_request + , int vector_length_request = 1 ) : m_league_size( league_size_ ) , m_team_size( team_size_request ) - , m_vector_length ( vector_length_request ) + , m_vector_length( vector_length_request ) + , m_scratch_size ( 0 ) { // Allow only power-of-two vector_length - int check = 0; - for(int k = 1; k <= vector_length_max(); k*=2) - if(k == vector_length_request) - check = 1; - if(!check) + if ( ! Kokkos::Impl::is_integral_power_of_two( vector_length_request ) ) { Impl::throw_runtime_exception( "Requested non-power-of-two vector length for TeamPolicy."); + } + + // Make sure league size is permissable + if(league_size_ >= int(Impl::cuda_internal_maximum_grid_count())) + Impl::throw_runtime_exception( "Requested too large league_size for TeamPolicy on Cuda execution space."); + + // Make sure total block size is permissable + if ( m_team_size * m_vector_length > 1024 ) { + Impl::throw_runtime_exception(std::string("Kokkos::TeamPolicy< Cuda > the team size is too large. Team size x vector length must be smaller than 1024.")); + } + } + + /** \brief Specify league size, request team size */ + TeamPolicy( execution_space & + , int league_size_ + , const Kokkos::AUTO_t & /* team_size_request */ + , int vector_length_request = 1 ) + : m_league_size( league_size_ ) + , m_team_size( -1 ) + , m_vector_length( vector_length_request ) + , m_scratch_size ( 0 ) + { + // Allow only power-of-two vector_length + if ( ! Kokkos::Impl::is_integral_power_of_two( vector_length_request ) ) { + Impl::throw_runtime_exception( "Requested non-power-of-two vector length for TeamPolicy."); + } // Make sure league size is permissable if(league_size_ >= int(Impl::cuda_internal_maximum_grid_count())) Impl::throw_runtime_exception( "Requested too large league_size for TeamPolicy on Cuda execution space."); } - TeamPolicy( int league_size_ , int team_size_request , int vector_length_request = 1 ) + TeamPolicy( int league_size_ + , int team_size_request + , int vector_length_request = 1 ) : m_league_size( league_size_ ) , m_team_size( team_size_request ) , m_vector_length ( vector_length_request ) + , m_scratch_size ( 0 ) { // Allow only power-of-two vector_length - int check = 0; - for(int k = 1; k <= vector_length_max(); k*=2) - if(k == vector_length_request) - check = 1; - if(!check) + if ( ! Kokkos::Impl::is_integral_power_of_two( vector_length_request ) ) { Impl::throw_runtime_exception( "Requested non-power-of-two vector length for TeamPolicy."); + } // Make sure league size is permissable if(league_size_ >= int(Impl::cuda_internal_maximum_grid_count())) Impl::throw_runtime_exception( "Requested too large league_size for TeamPolicy on Cuda execution space."); + // Make sure total block size is permissable + if ( m_team_size * m_vector_length > 1024 ) { + Impl::throw_runtime_exception(std::string("Kokkos::TeamPolicy< Cuda > the team size is too large. Team size x vector length must be smaller than 1024.")); + } + } + + TeamPolicy( int league_size_ + , const Kokkos::AUTO_t & /* team_size_request */ + , int vector_length_request = 1 ) + : m_league_size( league_size_ ) + , m_team_size( -1 ) + , m_vector_length ( vector_length_request ) + , m_scratch_size ( 0 ) + { + // Allow only power-of-two vector_length + if ( ! Kokkos::Impl::is_integral_power_of_two( vector_length_request ) ) { + Impl::throw_runtime_exception( "Requested non-power-of-two vector length for TeamPolicy."); + } + + // Make sure league size is permissable + if(league_size_ >= int(Impl::cuda_internal_maximum_grid_count())) + Impl::throw_runtime_exception( "Requested too large league_size for TeamPolicy on Cuda execution space."); + } + + template + TeamPolicy( int league_size_ + , int team_size_request + , const Experimental::TeamScratchRequest & scratch_request ) + : m_league_size( league_size_ ) + , m_team_size( team_size_request ) + , m_vector_length( 1 ) + , m_scratch_size(scratch_request.total(team_size_request)) + { + // Allow only power-of-two vector_length + if ( ! Kokkos::Impl::is_integral_power_of_two( m_vector_length ) ) { + Impl::throw_runtime_exception( "Requested non-power-of-two vector length for TeamPolicy."); + } + + // Make sure league size is permissable + if(league_size_ >= int(Impl::cuda_internal_maximum_grid_count())) + Impl::throw_runtime_exception( "Requested too large league_size for TeamPolicy on Cuda execution space."); + + // Make sure total block size is permissable + if ( m_team_size * m_vector_length > 1024 ) { + Impl::throw_runtime_exception(std::string("Kokkos::TeamPolicy< Cuda > the team size is too large. Team size x vector length must be smaller than 1024.")); + } + } + + template + TeamPolicy( int league_size_ + , const Kokkos::AUTO_t & /* team_size_request */ + , const Experimental::TeamScratchRequest & scratch_request ) + : m_league_size( league_size_ ) + , m_team_size( 256 ) + , m_vector_length ( 1 ) + , m_scratch_size(scratch_request.total(2356)) + { + // Allow only power-of-two vector_length + if ( ! Kokkos::Impl::is_integral_power_of_two( m_vector_length ) ) { + Impl::throw_runtime_exception( "Requested non-power-of-two vector length for TeamPolicy."); + } + + // Make sure league size is permissable + if(league_size_ >= int(Impl::cuda_internal_maximum_grid_count())) + Impl::throw_runtime_exception( "Requested too large league_size for TeamPolicy on Cuda execution space."); } typedef Kokkos::Impl::CudaTeamMember member_type ; @@ -357,35 +451,33 @@ namespace Kokkos { namespace Impl { template< class FunctorType , class Arg0 , class Arg1 , class Arg2 > -class ParallelFor< FunctorType , Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Cuda > > +class ParallelFor< FunctorType + , Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Cuda > + > { private: typedef Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Cuda > Policy ; + typedef typename Policy::member_type Member ; + typedef typename Policy::work_tag WorkTag ; const FunctorType m_functor ; const Policy m_policy ; - ParallelFor(); - ParallelFor & operator = ( const ParallelFor & ); + ParallelFor() = delete ; + ParallelFor & operator = ( const ParallelFor & ) = delete ; - template< class Tag > - inline static - __device__ - void driver( const FunctorType & functor - , typename Impl::enable_if< Impl::is_same< Tag , void >::value - , typename Policy::member_type const & >::type iwork - ) - { functor( iwork ); } + template< class TagType > + inline __device__ + typename std::enable_if< std::is_same< TagType , void >::value >::type + exec_range( const Member i ) const + { m_functor( i ); } - template< class Tag > - inline static - __device__ - void driver( const FunctorType & functor - , typename Impl::enable_if< ! Impl::is_same< Tag , void >::value - , typename Policy::member_type const & >::type iwork - ) - { functor( Tag() , iwork ); } + template< class TagType > + inline __device__ + typename std::enable_if< ! std::is_same< TagType , void >::value >::type + exec_range( const Member i ) const + { m_functor( TagType() , i ); } public: @@ -395,37 +487,44 @@ public: __device__ void operator()(void) const { - const typename Policy::member_type work_stride = blockDim.y * gridDim.x ; - const typename Policy::member_type work_end = m_policy.end(); + const Member work_stride = blockDim.y * gridDim.x ; + const Member work_end = m_policy.end(); - for ( typename Policy::member_type + for ( Member iwork = m_policy.begin() + threadIdx.y + blockDim.y * blockIdx.x ; iwork < work_end ; iwork += work_stride ) { - ParallelFor::template driver< typename Policy::work_tag >( m_functor, iwork ); + this-> template exec_range< WorkTag >( iwork ); } } - ParallelFor( const FunctorType & functor , - const Policy & policy ) - : m_functor( functor ) - , m_policy( policy ) + inline + void execute() const { + const int nwork = m_policy.end() - m_policy.begin(); const dim3 block( 1 , CudaTraits::WarpSize * cuda_internal_maximum_warp_count(), 1); - const dim3 grid( std::min( ( int( policy.end() - policy.begin() ) + block.y - 1 ) / block.y - , cuda_internal_maximum_grid_count() ) - , 1 , 1); + const dim3 grid( std::min( ( nwork + block.y - 1 ) / block.y , cuda_internal_maximum_grid_count() ) , 1 , 1); CudaParallelLaunch< ParallelFor >( *this , grid , block , 0 ); } + + ParallelFor( const FunctorType & arg_functor , + const Policy & arg_policy ) + : m_functor( arg_functor ) + , m_policy( arg_policy ) + { } }; template< class FunctorType , class Arg0 , class Arg1 > -class ParallelFor< FunctorType , Kokkos::TeamPolicy< Arg0 , Arg1 , Kokkos::Cuda > > +class ParallelFor< FunctorType + , Kokkos::TeamPolicy< Arg0 , Arg1 , Kokkos::Cuda > + > { private: typedef Kokkos::TeamPolicy< Arg0 , Arg1 , Kokkos::Cuda > Policy ; + typedef typename Policy::member_type Member ; + typedef typename Policy::work_tag WorkTag ; public: @@ -442,20 +541,22 @@ private: // const FunctorType m_functor ; - size_type m_shmem_begin ; - size_type m_shmem_size ; - size_type m_league_size ; + const size_type m_league_size ; + const size_type m_team_size ; + const size_type m_vector_size ; + const size_type m_shmem_begin ; + const size_type m_shmem_size ; template< class TagType > __device__ inline - void driver( typename Impl::enable_if< Impl::is_same< TagType , void >::value , - const typename Policy::member_type & >::type member ) const + typename std::enable_if< std::is_same< TagType , void >::value >::type + exec_team( const Member & member ) const { m_functor( member ); } template< class TagType > __device__ inline - void driver( typename Impl::enable_if< ! Impl::is_same< TagType , void >::value , - const typename Policy::member_type & >::type member ) const + typename std::enable_if< ! std::is_same< TagType , void >::value >::type + exec_team( const Member & member ) const { m_functor( TagType() , member ); } public: @@ -466,7 +567,7 @@ public: // Iterate this block through the league for ( int league_rank = blockIdx.x ; league_rank < m_league_size ; league_rank += gridDim.x ) { - ParallelFor::template driver< typename Policy::work_tag >( + this-> template exec_team< WorkTag >( typename Policy::member_type( kokkos_impl_cuda_shared_memory() , m_shmem_begin , m_shmem_size @@ -475,28 +576,42 @@ public: } } + inline + void execute() const + { + const int shmem_size_total = m_shmem_begin + m_shmem_size ; + const dim3 grid( int(m_league_size) , 1 , 1 ); + const dim3 block( int(m_vector_size) , int(m_team_size) , 1 ); - ParallelFor( const FunctorType & functor - , const Policy & policy - ) - : m_functor( functor ) - , m_shmem_begin( sizeof(double) * ( policy.team_size() + 2 ) ) - , m_shmem_size( FunctorTeamShmemSize< FunctorType >::value( functor , policy.team_size() ) ) - , m_league_size( policy.league_size() ) - { - // Functor's reduce memory, team scan memory, and team shared memory depend upon team size. + CudaParallelLaunch< ParallelFor >( *this, grid, block, shmem_size_total ); // copy to device and execute - const int shmem_size_total = m_shmem_begin + m_shmem_size ; - - if ( CudaTraits::SharedMemoryCapacity < shmem_size_total ) { - Kokkos::Impl::throw_runtime_exception(std::string("Kokkos::Impl::ParallelFor< Cuda > insufficient shared memory")); } - const dim3 grid( int(policy.league_size()) , 1 , 1 ); - const dim3 block( policy.vector_length() , policy.team_size() , 1 ); + ParallelFor( const FunctorType & arg_functor + , const Policy & arg_policy + ) + : m_functor( arg_functor ) + , m_league_size( arg_policy.league_size() ) + , m_team_size( 0 <= arg_policy.team_size() ? arg_policy.team_size() : + Kokkos::Impl::cuda_get_opt_block_size< ParallelFor >( arg_functor , arg_policy.vector_length(), arg_policy.scratch_size() ) / arg_policy.vector_length() ) + , m_vector_size( arg_policy.vector_length() ) + , m_shmem_begin( sizeof(double) * ( m_team_size + 2 ) ) + , m_shmem_size( arg_policy.scratch_size() + FunctorTeamShmemSize< FunctorType >::value( m_functor , m_team_size ) ) + { + // Functor's reduce memory, team scan memory, and team shared memory depend upon team size. - CudaParallelLaunch< ParallelFor >( *this, grid, block, shmem_size_total ); // copy to device and execute - } + const int shmem_size_total = m_shmem_begin + m_shmem_size ; + + if ( CudaTraits::SharedMemoryCapacity < shmem_size_total ) { + Kokkos::Impl::throw_runtime_exception(std::string("Kokkos::Impl::ParallelFor< Cuda > insufficient shared memory")); + } + + if ( m_team_size > + Kokkos::Impl::cuda_get_max_block_size< ParallelFor > + ( arg_functor , arg_policy.vector_length(), arg_policy.scratch_size() ) / arg_policy.vector_length()) { + Kokkos::Impl::throw_runtime_exception(std::string("Kokkos::Impl::ParallelFor< Cuda > requested too large team size.")); + } + } }; } // namespace Impl @@ -509,15 +624,20 @@ namespace Kokkos { namespace Impl { template< class FunctorType , class Arg0 , class Arg1 , class Arg2 > -class ParallelReduce< FunctorType , Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Cuda > > +class ParallelReduce< FunctorType + , Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Cuda > + > { private: typedef Kokkos::RangePolicy Policy ; - typedef typename Policy::WorkRange work_range ; - typedef typename Policy::work_tag work_tag ; - typedef Kokkos::Impl::FunctorValueTraits< FunctorType , work_tag > ValueTraits ; - typedef Kokkos::Impl::FunctorValueInit< FunctorType , work_tag > ValueInit ; + + typedef typename Policy::WorkRange WorkRange ; + typedef typename Policy::work_tag WorkTag ; + typedef typename Policy::member_type Member ; + + typedef Kokkos::Impl::FunctorValueTraits< FunctorType, WorkTag > ValueTraits ; + typedef Kokkos::Impl::FunctorValueInit< FunctorType, WorkTag > ValueInit ; public: @@ -529,40 +649,27 @@ public: // Algorithmic constraints: blockSize is a power of two AND blockDim.y == blockDim.z == 1 - const FunctorType m_functor ; - const Policy m_policy ; - size_type * m_scratch_space ; - size_type * m_scratch_flags ; - size_type * m_unified_space ; + const FunctorType m_functor ; + const Policy m_policy ; + const pointer_type m_result_ptr ; + size_type * m_scratch_space ; + size_type * m_scratch_flags ; + size_type * m_unified_space ; - // Determine block size constrained by shared memory: - static inline - unsigned local_block_size( const FunctorType & f ) - { - unsigned n = CudaTraits::WarpSize * 8 ; - while ( n && CudaTraits::SharedMemoryCapacity < cuda_single_inter_block_reduce_scan_shmem( f , n ) ) { n >>= 1 ; } - return n ; - } + template< class TagType > + __device__ inline + typename std::enable_if< std::is_same< TagType , void >::value >::type + exec_range( const Member & i , reference_type update ) const + { m_functor( i , update ); } - template< class Tag > - inline static - __device__ - void driver( const FunctorType & functor - , typename Impl::enable_if< Impl::is_same< Tag , void >::value - , typename Policy::member_type const & >::type iwork - , reference_type value ) - { functor( iwork , value ); } + template< class TagType > + __device__ inline + typename std::enable_if< ! std::is_same< TagType , void >::value >::type + exec_range( const Member & i , reference_type update ) const + { m_functor( TagType() , i , update ); } - template< class Tag > - inline static - __device__ - void driver( const FunctorType & functor - , typename Impl::enable_if< ! Impl::is_same< Tag , void >::value - , typename Policy::member_type const & >::type iwork - , reference_type value ) - { functor( Tag() , iwork , value ); } +#if ! defined( KOKKOS_EXPERIMENTAL_CUDA_SHFL_REDUCTION ) -#ifndef KOKKOS_EXPERIMENTAL_CUDA_SHFL_REDUCTION __device__ inline void operator()(void) const { @@ -578,16 +685,16 @@ public: // Accumulate the values for this block. // The accumulation ordering does not match the final pass, but is arithmatically equivalent. - const work_range range( m_policy , blockIdx.x , gridDim.x ); + const WorkRange range( m_policy , blockIdx.x , gridDim.x ); - for ( typename work_range::member_type iwork = range.begin() + threadIdx.y , iwork_end = range.end() ; + for ( Member iwork = range.begin() + threadIdx.y , iwork_end = range.end() ; iwork < iwork_end ; iwork += blockDim.y ) { - ParallelReduce::template driver< work_tag >( m_functor , iwork , value ); + this-> template exec_range< WorkTag >( iwork , value ); } } // Reduce with final value at blockDim.y - 1 location. - if ( cuda_single_inter_block_reduce_scan( + if ( cuda_single_inter_block_reduce_scan( m_functor , blockIdx.x , gridDim.x , kokkos_impl_cuda_shared_memory() , m_scratch_space , m_scratch_flags ) ) { @@ -597,7 +704,7 @@ public: size_type * const global = m_unified_space ? m_unified_space : m_scratch_space ; if ( threadIdx.y == 0 ) { - Kokkos::Impl::FunctorFinal< FunctorType , work_tag >::final( m_functor , shared ); + Kokkos::Impl::FunctorFinal< FunctorType , WorkTag >::final( m_functor , shared ); } if ( CudaTraits::WarpSize < word_count.value ) { __syncthreads(); } @@ -605,7 +712,9 @@ public: for ( unsigned i = threadIdx.y ; i < word_count.value ; i += blockDim.y ) { global[i] = shared[i]; } } } -#else + +#else /* defined( KOKKOS_EXPERIMENTAL_CUDA_SHFL_REDUCTION ) */ + __device__ inline void operator()(void) const { @@ -619,9 +728,9 @@ public: const Policy range( m_policy , blockIdx.x , gridDim.x ); - for ( typename Policy::member_type iwork = range.begin() + threadIdx.y , iwork_end = range.end() ; + for ( Member iwork = range.begin() + threadIdx.y , iwork_end = range.end() ; iwork < iwork_end ; iwork += blockDim.y ) { - ParallelReduce::template driver< work_tag >( m_functor , iwork , value ); + this-> template exec_range< WorkTag >( iwork , value ); } pointer_type const result = (pointer_type) (m_unified_space ? m_unified_space : m_scratch_space) ; @@ -631,68 +740,92 @@ public: (value,Impl::JoinAdd(),m_scratch_space,result,m_scratch_flags,max_active_thread)) { const unsigned id = threadIdx.y*blockDim.x + threadIdx.x; if(id==0) { - Kokkos::Impl::FunctorFinal< FunctorType , work_tag >::final( m_functor , (void*) &value ); + Kokkos::Impl::FunctorFinal< FunctorType , WorkTag >::final( m_functor , (void*) &value ); *result = value; } } } + #endif - template< class HostViewType > - ParallelReduce( const FunctorType & functor - , const Policy & policy - , const HostViewType & result - ) - : m_functor( functor ) - , m_policy( policy ) - , m_scratch_space( 0 ) - , m_scratch_flags( 0 ) - , m_unified_space( 0 ) - { - const int block_size = local_block_size( functor ); - const int block_count = std::min( int(block_size) - , ( int(policy.end() - policy.begin()) + block_size - 1 ) / block_size - ); - m_scratch_space = cuda_internal_scratch_space( ValueTraits::value_size( functor ) * block_count ); - m_scratch_flags = cuda_internal_scratch_flags( sizeof(size_type) ); - m_unified_space = cuda_internal_scratch_unified( ValueTraits::value_size( functor ) ); + // Determine block size constrained by shared memory: + static inline + unsigned local_block_size( const FunctorType & f ) + { + unsigned n = CudaTraits::WarpSize * 8 ; + while ( n && CudaTraits::SharedMemoryCapacity < cuda_single_inter_block_reduce_scan_shmem( f , n ) ) { n >>= 1 ; } + return n ; + } + + inline + void execute() + { + const int block_size = local_block_size( m_functor ); + + m_scratch_space = cuda_internal_scratch_space( ValueTraits::value_size( m_functor ) * block_size /* block_size == max block_count */ ); + m_scratch_flags = cuda_internal_scratch_flags( sizeof(size_type) ); + m_unified_space = cuda_internal_scratch_unified( ValueTraits::value_size( m_functor ) ); + + const int nwork = m_policy.end() - m_policy.begin(); + // REQUIRED ( 1 , N , 1 ) + const dim3 block( 1 , block_size , 1 ); + // Required grid.x <= block.y + const dim3 grid( std::min( int(block.y) , int( ( nwork + block.y - 1 ) / block.y ) ) , 1 , 1 ); - const dim3 grid( block_count , 1 , 1 ); - const dim3 block( 1 , block_size , 1 ); // REQUIRED DIMENSIONS ( 1 , N , 1 ) #ifdef KOKKOS_EXPERIMENTAL_CUDA_SHFL_REDUCTION const int shmem = 0; #else - const int shmem = cuda_single_inter_block_reduce_scan_shmem( m_functor , block.y ); + const int shmem = cuda_single_inter_block_reduce_scan_shmem( m_functor , block.y ); #endif CudaParallelLaunch< ParallelReduce >( *this, grid, block, shmem ); // copy to device and execute Cuda::fence(); - if ( result.ptr_on_device() ) { + if ( m_result_ptr ) { if ( m_unified_space ) { const int count = ValueTraits::value_count( m_functor ); - for ( int i = 0 ; i < count ; ++i ) { result.ptr_on_device()[i] = pointer_type(m_unified_space)[i] ; } + for ( int i = 0 ; i < count ; ++i ) { m_result_ptr[i] = pointer_type(m_unified_space)[i] ; } } else { const int size = ValueTraits::value_size( m_functor ); - DeepCopy( result.ptr_on_device() , m_scratch_space , size ); + DeepCopy( m_result_ptr , m_scratch_space , size ); } } } + + template< class HostViewType > + ParallelReduce( const FunctorType & arg_functor + , const Policy & arg_policy + , const HostViewType & arg_result + ) + : m_functor( arg_functor ) + , m_policy( arg_policy ) + , m_result_ptr( arg_result.ptr_on_device() ) + , m_scratch_space( 0 ) + , m_scratch_flags( 0 ) + , m_unified_space( 0 ) + { } }; +//---------------------------------------------------------------------------- + template< class FunctorType , class Arg0 , class Arg1 > -class ParallelReduce< FunctorType , Kokkos::TeamPolicy< Arg0 , Arg1 , Kokkos::Cuda > > +class ParallelReduce< FunctorType + , Kokkos::TeamPolicy< Arg0 , Arg1 , Kokkos::Cuda > + > { private: - typedef Kokkos::TeamPolicy Policy ; - typedef typename Policy::work_tag work_tag ; - typedef Kokkos::Impl::FunctorValueTraits< FunctorType , work_tag > ValueTraits ; - typedef Kokkos::Impl::FunctorValueInit< FunctorType , work_tag > ValueInit ; - typedef typename ValueTraits::pointer_type pointer_type ; - typedef typename ValueTraits::reference_type reference_type ; + typedef Kokkos::TeamPolicy Policy ; + typedef typename Policy::member_type Member ; + typedef typename Policy::work_tag WorkTag ; + + typedef Kokkos::Impl::FunctorValueTraits< FunctorType, WorkTag > ValueTraits ; + typedef Kokkos::Impl::FunctorValueInit< FunctorType, WorkTag > ValueInit ; + + typedef typename ValueTraits::pointer_type pointer_type ; + typedef typename ValueTraits::reference_type reference_type ; public: @@ -709,27 +842,27 @@ private: // [ team shared space ] // - const FunctorType m_functor ; - size_type * m_scratch_space ; - size_type * m_scratch_flags ; - size_type * m_unified_space ; - size_type m_team_begin ; - size_type m_shmem_begin ; - size_type m_shmem_size ; - size_type m_league_size ; + const FunctorType m_functor ; + const pointer_type m_result_ptr ; + size_type * m_scratch_space ; + size_type * m_scratch_flags ; + size_type * m_unified_space ; + size_type m_team_begin ; + size_type m_shmem_begin ; + size_type m_shmem_size ; + const size_type m_league_size ; + const size_type m_team_size ; template< class TagType > __device__ inline - void driver( typename Impl::enable_if< Impl::is_same< TagType , void >::value , - const typename Policy::member_type & >::type member - , reference_type update ) const + typename std::enable_if< std::is_same< TagType , void >::value >::type + exec_team( const Member & member , reference_type update ) const { m_functor( member , update ); } template< class TagType > __device__ inline - void driver( typename Impl::enable_if< ! Impl::is_same< TagType , void >::value , - const typename Policy::member_type & >::type member - , reference_type update ) const + typename std::enable_if< ! std::is_same< TagType , void >::value >::type + exec_team( const Member & member , reference_type update ) const { m_functor( TagType() , member , update ); } public: @@ -745,9 +878,8 @@ public: // Iterate this block through the league for ( int league_rank = blockIdx.x ; league_rank < m_league_size ; league_rank += gridDim.x ) { - - ParallelReduce::template driver< work_tag > - ( typename Policy::member_type( kokkos_impl_cuda_shared_memory() + m_team_begin + this-> template exec_team< WorkTag > + ( Member( kokkos_impl_cuda_shared_memory() + m_team_begin , m_shmem_begin , m_shmem_size , league_rank @@ -756,7 +888,7 @@ public: } // Reduce with final value at blockDim.y - 1 location. - if ( cuda_single_inter_block_reduce_scan( + if ( cuda_single_inter_block_reduce_scan( m_functor , blockIdx.x , gridDim.x , kokkos_impl_cuda_shared_memory() , m_scratch_space , m_scratch_flags ) ) { @@ -766,7 +898,7 @@ public: size_type * const global = m_unified_space ? m_unified_space : m_scratch_space ; if ( threadIdx.y == 0 ) { - Kokkos::Impl::FunctorFinal< FunctorType , work_tag >::final( m_functor , shared ); + Kokkos::Impl::FunctorFinal< FunctorType , WorkTag >::final( m_functor , shared ); } if ( CudaTraits::WarpSize < word_count.value ) { __syncthreads(); } @@ -775,58 +907,85 @@ public: } } + inline + void execute() + { + const int block_count = std::min( m_league_size , m_team_size ); + + m_scratch_space = cuda_internal_scratch_space( ValueTraits::value_size( m_functor ) * block_count ); + m_scratch_flags = cuda_internal_scratch_flags( sizeof(size_type) ); + m_unified_space = cuda_internal_scratch_unified( ValueTraits::value_size( m_functor ) ); + + // REQUIRED DIMENSIONS ( 1 , N , 1 ) + const dim3 block( 1 , m_team_size , 1 ); + const dim3 grid( std::min( int(m_league_size) , int(m_team_size) ) , 1 , 1 ); + const int shmem_size_total = m_team_begin + m_shmem_begin + m_shmem_size ; + + CudaParallelLaunch< ParallelReduce >( *this, grid, block, shmem_size_total ); // copy to device and execute + + Cuda::fence(); + + if ( m_result_ptr ) { + if ( m_unified_space ) { + const int count = ValueTraits::value_count( m_functor ); + for ( int i = 0 ; i < count ; ++i ) { m_result_ptr[i] = pointer_type(m_unified_space)[i] ; } + } + else { + const int size = ValueTraits::value_size( m_functor ); + DeepCopy( m_result_ptr, m_scratch_space, size ); + } + } + } template< class HostViewType > - ParallelReduce( const FunctorType & functor - , const Policy & policy - , const HostViewType & result + ParallelReduce( const FunctorType & arg_functor + , const Policy & arg_policy + , const HostViewType & arg_result ) - : m_functor( functor ) + : m_functor( arg_functor ) + , m_result_ptr( arg_result.ptr_on_device() ) , m_scratch_space( 0 ) , m_scratch_flags( 0 ) , m_unified_space( 0 ) - , m_team_begin( cuda_single_inter_block_reduce_scan_shmem( functor , policy.team_size() ) ) - , m_shmem_begin( sizeof(double) * ( policy.team_size() + 2 ) ) - , m_shmem_size( FunctorTeamShmemSize< FunctorType >::value( functor , policy.team_size() ) ) - , m_league_size( policy.league_size() ) + , m_team_begin( 0 ) + , m_shmem_begin( 0 ) + , m_shmem_size( 0 ) + , m_league_size( arg_policy.league_size() ) + , m_team_size( 0 <= arg_policy.team_size() ? arg_policy.team_size() : + Kokkos::Impl::cuda_get_opt_block_size< ParallelReduce >( arg_functor , arg_policy.vector_length(), arg_policy.scratch_size() ) / arg_policy.vector_length() ) { + // Return Init value if the number of worksets is zero + if( arg_policy.league_size() == 0) { + ValueInit::init( m_functor , arg_result.ptr_on_device() ); + return ; + } + + m_team_begin = cuda_single_inter_block_reduce_scan_shmem( arg_functor , m_team_size ); + m_shmem_begin = sizeof(double) * ( m_team_size + 2 ); + m_shmem_size = arg_policy.scratch_size() + FunctorTeamShmemSize< FunctorType >::value( arg_functor , m_team_size ); // The global parallel_reduce does not support vector_length other than 1 at the moment - if(policy.vector_length() > 1) + if( arg_policy.vector_length() > 1) Impl::throw_runtime_exception( "Kokkos::parallel_reduce with a TeamPolicy using a vector length of greater than 1 is not currently supported for CUDA."); + if( m_team_size < 32) + Impl::throw_runtime_exception( "Kokkos::parallel_reduce with a TeamPolicy using a team_size smaller than 32 is not currently supported with CUDA."); + // Functor's reduce memory, team scan memory, and team shared memory depend upon team size. const int shmem_size_total = m_team_begin + m_shmem_begin + m_shmem_size ; - const int not_power_of_two = 0 != ( policy.team_size() & ( policy.team_size() - 1 ) ); - if ( not_power_of_two || CudaTraits::SharedMemoryCapacity < shmem_size_total ) { + if ( ! Kokkos::Impl::is_integral_power_of_two( m_team_size ) || + CudaTraits::SharedMemoryCapacity < shmem_size_total ) { Kokkos::Impl::throw_runtime_exception(std::string("Kokkos::Impl::ParallelReduce< Cuda > bad team size")); } - const int block_count = std::min( policy.league_size() , policy.team_size() ); - - m_scratch_space = cuda_internal_scratch_space( ValueTraits::value_size( functor ) * block_count ); - m_scratch_flags = cuda_internal_scratch_flags( sizeof(size_type) ); - m_unified_space = cuda_internal_scratch_unified( ValueTraits::value_size( functor ) ); - - const dim3 grid( block_count , 1 , 1 ); - const dim3 block( 1 , policy.team_size() , 1 ); // REQUIRED DIMENSIONS ( 1 , N , 1 ) - - CudaParallelLaunch< ParallelReduce >( *this, grid, block, shmem_size_total ); // copy to device and execute - - Cuda::fence(); - - if ( result.ptr_on_device() ) { - if ( m_unified_space ) { - const int count = ValueTraits::value_count( m_functor ); - for ( int i = 0 ; i < count ; ++i ) { result.ptr_on_device()[i] = pointer_type(m_unified_space)[i] ; } - } - else { - const int size = ValueTraits::value_size( m_functor ); - DeepCopy( result.ptr_on_device() , m_scratch_space , size ); - } + if ( m_team_size > + Kokkos::Impl::cuda_get_max_block_size< ParallelReduce > + ( arg_functor , arg_policy.vector_length(), arg_policy.scratch_size() ) / arg_policy.vector_length()) { + Kokkos::Impl::throw_runtime_exception(std::string("Kokkos::Impl::ParallelReduce< Cuda > requested too large team size.")); } + } }; @@ -840,68 +999,53 @@ namespace Kokkos { namespace Impl { template< class FunctorType , class Arg0 , class Arg1 , class Arg2 > -class ParallelScan< FunctorType , Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Cuda > > +class ParallelScan< FunctorType + , Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Cuda > + > { private: - typedef Kokkos::RangePolicy Policy ; - typedef typename Policy::WorkRange work_range ; - typedef typename Policy::work_tag work_tag ; - typedef Kokkos::Impl::FunctorValueTraits< FunctorType , work_tag > ValueTraits ; - typedef Kokkos::Impl::FunctorValueInit< FunctorType , work_tag > ValueInit ; - typedef Kokkos::Impl::FunctorValueOps< FunctorType , work_tag > ValueOps ; + typedef Kokkos::RangePolicy Policy ; + typedef typename Policy::member_type Member ; + typedef typename Policy::work_tag WorkTag ; + typedef typename Policy::WorkRange WorkRange ; + + typedef Kokkos::Impl::FunctorValueTraits< FunctorType, WorkTag > ValueTraits ; + typedef Kokkos::Impl::FunctorValueInit< FunctorType, WorkTag > ValueInit ; + typedef Kokkos::Impl::FunctorValueOps< FunctorType, WorkTag > ValueOps ; public: + typedef typename ValueTraits::pointer_type pointer_type ; typedef typename ValueTraits::reference_type reference_type ; typedef FunctorType functor_type ; typedef Cuda::size_type size_type ; +private: + // Algorithmic constraints: // (a) blockDim.y is a power of two // (b) blockDim.y == blockDim.z == 1 // (c) gridDim.x <= blockDim.y * blockDim.y // (d) gridDim.y == gridDim.z == 1 - // Determine block size constrained by shared memory: - static inline - unsigned local_block_size( const FunctorType & f ) - { - // blockDim.y must be power of two = 128 (4 warps) or 256 (8 warps) or 512 (16 warps) - // gridDim.x <= blockDim.y * blockDim.y - // - // 4 warps was 10% faster than 8 warps and 20% faster than 16 warps in unit testing - - unsigned n = CudaTraits::WarpSize * 4 ; - while ( n && CudaTraits::SharedMemoryCapacity < cuda_single_inter_block_reduce_scan_shmem( f , n ) ) { n >>= 1 ; } - return n ; - } - const FunctorType m_functor ; const Policy m_policy ; size_type * m_scratch_space ; size_type * m_scratch_flags ; - size_type m_final ; - - template< class Tag > - inline static - __device__ - void driver( const FunctorType & functor - , typename Impl::enable_if< Impl::is_same< Tag , void >::value - , typename Policy::member_type const & >::type iwork - , reference_type value - , const bool final ) - { functor( iwork , value , final ); } + size_type m_final ; - template< class Tag > - inline static - __device__ - void driver( const FunctorType & functor - , typename Impl::enable_if< ! Impl::is_same< Tag , void >::value - , typename Policy::member_type const & >::type iwork - , reference_type value - , const bool final ) - { functor( Tag() , iwork , value , final ); } + template< class TagType > + __device__ inline + typename std::enable_if< std::is_same< TagType , void >::value >::type + exec_range( const Member & i , reference_type update , const bool final ) const + { m_functor( i , update , final ); } + + template< class TagType > + __device__ inline + typename std::enable_if< ! std::is_same< TagType , void >::value >::type + exec_range( const Member & i , reference_type update , const bool final ) const + { m_functor( TagType() , i , update , final ); } //---------------------------------------- @@ -920,18 +1064,17 @@ public: // Accumulate the values for this block. // The accumulation ordering does not match the final pass, but is arithmatically equivalent. - const work_range range( m_policy , blockIdx.x , gridDim.x ); + const WorkRange range( m_policy , blockIdx.x , gridDim.x ); - for ( typename Policy::member_type iwork = range.begin() + threadIdx.y , iwork_end = range.end() ; + for ( Member iwork = range.begin() + threadIdx.y , iwork_end = range.end() ; iwork < iwork_end ; iwork += blockDim.y ) { - ParallelScan::template driver< work_tag > - ( m_functor , iwork , ValueOps::reference( shared_value ) , false ); + this-> template exec_range< WorkTag >( iwork , ValueOps::reference( shared_value ) , false ); } // Reduce and scan, writing out scan of blocks' totals and block-groups' totals. // Blocks' scan values are written to 'blockIdx.x' location. // Block-groups' scan values are at: i = ( j * blockDim.y - 1 ) for i < gridDim.x - cuda_single_inter_block_reduce_scan( m_functor , blockIdx.x , gridDim.x , kokkos_impl_cuda_shared_memory() , m_scratch_space , m_scratch_flags ); + cuda_single_inter_block_reduce_scan( m_functor , blockIdx.x , gridDim.x , kokkos_impl_cuda_shared_memory() , m_scratch_space , m_scratch_flags ); } //---------------------------------------- @@ -956,7 +1099,7 @@ public: ValueInit::init( m_functor , shared_accum ); } - const work_range range( m_policy , blockIdx.x , gridDim.x ); + const WorkRange range( m_policy , blockIdx.x , gridDim.x ); for ( typename Policy::member_type iwork_base = range.begin(); iwork_base < range.end() ; iwork_base += blockDim.y ) { @@ -975,12 +1118,11 @@ public: // Call functor to accumulate inclusive scan value for this work item if ( iwork < range.end() ) { - ParallelScan::template driver< work_tag > - ( m_functor , iwork , ValueOps::reference( shared_prefix + word_count.value ) , false ); + this-> template exec_range< WorkTag >( iwork , ValueOps::reference( shared_prefix + word_count.value ) , false ); } // Scan block values into locations shared_data[1..blockDim.y] - cuda_intra_block_reduce_scan( m_functor , ValueTraits::pointer_type(shared_data+word_count.value) ); + cuda_intra_block_reduce_scan( m_functor , ValueTraits::pointer_type(shared_data+word_count.value) ); { size_type * const block_total = shared_data + word_count.value * blockDim.y ; @@ -989,12 +1131,13 @@ public: // Call functor with exclusive scan value if ( iwork < range.end() ) { - ParallelScan::template driver< work_tag > - ( m_functor , iwork , ValueOps::reference( shared_prefix ) , true ); + this-> template exec_range< WorkTag >( iwork , ValueOps::reference( shared_prefix ) , true ); } } } +public: + //---------------------------------------- __device__ inline @@ -1008,44 +1151,63 @@ public: } } - ParallelScan( const FunctorType & functor , - const Policy & policy ) - : m_functor( functor ) - , m_policy( policy ) + // Determine block size constrained by shared memory: + static inline + unsigned local_block_size( const FunctorType & f ) + { + // blockDim.y must be power of two = 128 (4 warps) or 256 (8 warps) or 512 (16 warps) + // gridDim.x <= blockDim.y * blockDim.y + // + // 4 warps was 10% faster than 8 warps and 20% faster than 16 warps in unit testing + + unsigned n = CudaTraits::WarpSize * 4 ; + while ( n && CudaTraits::SharedMemoryCapacity < cuda_single_inter_block_reduce_scan_shmem( f , n ) ) { n >>= 1 ; } + return n ; + } + + inline + void execute() + { + enum { GridMaxComputeCapability_2x = 0x0ffff }; + + const int block_size = local_block_size( m_functor ); + + const int grid_max = + ( block_size * block_size ) < GridMaxComputeCapability_2x ? + ( block_size * block_size ) : GridMaxComputeCapability_2x ; + + // At most 'max_grid' blocks: + const int nwork = m_policy.end() - m_policy.begin(); + const int max_grid = std::min( int(grid_max) , int(( nwork + block_size - 1 ) / block_size )); + + // How much work per block: + const int work_per_block = ( nwork + max_grid - 1 ) / max_grid ; + + // How many block are really needed for this much work: + const int grid_x = ( nwork + work_per_block - 1 ) / work_per_block ; + + m_scratch_space = cuda_internal_scratch_space( ValueTraits::value_size( m_functor ) * grid_x ); + m_scratch_flags = cuda_internal_scratch_flags( sizeof(size_type) * 1 ); + + const dim3 grid( grid_x , 1 , 1 ); + const dim3 block( 1 , block_size , 1 ); // REQUIRED DIMENSIONS ( 1 , N , 1 ) + const int shmem = ValueTraits::value_size( m_functor ) * ( block_size + 2 ); + + m_final = false ; + CudaParallelLaunch< ParallelScan >( *this, grid, block, shmem ); // copy to device and execute + + m_final = true ; + CudaParallelLaunch< ParallelScan >( *this, grid, block, shmem ); // copy to device and execute + } + + ParallelScan( const FunctorType & arg_functor , + const Policy & arg_policy ) + : m_functor( arg_functor ) + , m_policy( arg_policy ) , m_scratch_space( 0 ) , m_scratch_flags( 0 ) , m_final( false ) - { - enum { GridMaxComputeCapability_2x = 0x0ffff }; - - const int block_size = local_block_size( functor ); - - const int grid_max = ( block_size * block_size ) < GridMaxComputeCapability_2x ? - ( block_size * block_size ) : GridMaxComputeCapability_2x ; - - // At most 'max_grid' blocks: - const int nwork = policy.end() - policy.begin(); - const int max_grid = std::min( int(grid_max) , int(( nwork + block_size - 1 ) / block_size )); - - // How much work per block: - const int work_per_block = ( nwork + max_grid - 1 ) / max_grid ; - - // How many block are really needed for this much work: - const dim3 grid( ( nwork + work_per_block - 1 ) / work_per_block , 1 , 1 ); - const dim3 block( 1 , block_size , 1 ); // REQUIRED DIMENSIONS ( 1 , N , 1 ) - const int shmem = ValueTraits::value_size( functor ) * ( block_size + 2 ); - - m_scratch_space = cuda_internal_scratch_space( ValueTraits::value_size( functor ) * grid.x ); - m_scratch_flags = cuda_internal_scratch_flags( sizeof(size_type) * 1 ); - - m_final = false ; - CudaParallelLaunch< ParallelScan >( *this, grid, block, shmem ); // copy to device and execute - - m_final = true ; - CudaParallelLaunch< ParallelScan >( *this, grid, block, shmem ); // copy to device and execute - } - - void wait() const { Cuda::fence(); } + { } }; } // namespace Impl @@ -1519,7 +1681,7 @@ void parallel_reduce( const ExecPolicy & policy , const ViewType & result_view , const std::string& str = "" , typename Impl::enable_if< - ( Impl::is_view::value && ! Impl::is_integral< ExecPolicy >::value && + ( Kokkos::is_view::value && ! Impl::is_integral< ExecPolicy >::value && Impl::is_same::value )>::type * = 0 ) { @@ -1533,8 +1695,12 @@ void parallel_reduce( const ExecPolicy & policy Kokkos::Experimental::beginParallelScan("" == str ? typeid(FunctorType).name() : str, 0, &kpID); } #endif - - (void) Impl::ParallelReduce< FunctorType, ExecPolicy >( functor , policy , result_view ); + + Kokkos::Impl::shared_allocation_tracking_claim_and_disable(); + Impl::ParallelReduce< FunctorType, ExecPolicy > closure( functor , policy , result_view ); + Kokkos::Impl::shared_allocation_tracking_release_and_enable(); + + closure.execute(); #ifdef KOKKOSP_ENABLE_PROFILING if(Kokkos::Experimental::profileLibraryLoaded()) { @@ -1551,7 +1717,7 @@ void parallel_reduce( const ExecPolicy & policy , ResultType& result_ref , const std::string& str = "" , typename Impl::enable_if< - ( ! Impl::is_view::value && + ( ! Kokkos::is_view::value && ! Impl::IsNonTrivialReduceFunctor::value && ! Impl::is_integral< ExecPolicy >::value && Impl::is_same::value )>::type * = 0 ) @@ -1583,7 +1749,11 @@ void parallel_reduce( const ExecPolicy & policy } #endif - (void) Impl::ParallelReduce< FunctorType, ExecPolicy >( FunctorType(functor_in) , policy , result_view ); + Kokkos::Impl::shared_allocation_tracking_claim_and_disable(); + Impl::ParallelReduce< FunctorType, ExecPolicy > closure( FunctorType(functor_in) , policy , result_view ); + Kokkos::Impl::shared_allocation_tracking_release_and_enable(); + + closure.execute(); #ifdef KOKKOSP_ENABLE_PROFILING if(Kokkos::Experimental::profileLibraryLoaded()) { @@ -1630,7 +1800,11 @@ void parallel_reduce( const ExecPolicy & policy } #endif - (void) Impl::ParallelReduce< FunctorType, ExecPolicy >( functor , policy , result_view ); + Kokkos::Impl::shared_allocation_tracking_claim_and_disable(); + Impl::ParallelReduce< FunctorType, ExecPolicy > closure( functor , policy , result_view ); + Kokkos::Impl::shared_allocation_tracking_release_and_enable(); + + closure.execute(); #ifdef KOKKOSP_ENABLE_PROFILING if(Kokkos::Experimental::profileLibraryLoaded()) { @@ -1646,7 +1820,7 @@ void parallel_reduce( const size_t work_count , const FunctorTypeIn & functor_in , const ViewType & result_view , const std::string& str = "" - , typename Impl::enable_if<( Impl::is_view::value && + , typename Impl::enable_if<( Kokkos::is_view::value && Impl::is_same< typename Impl::FunctorPolicyExecutionSpace< FunctorTypeIn , void >::execution_space, Kokkos::Cuda>::value @@ -1670,7 +1844,11 @@ void parallel_reduce( const size_t work_count } #endif - (void) Impl::ParallelReduce< FunctorType, ExecPolicy >( functor , ExecPolicy(0,work_count) , result_view ); + Kokkos::Impl::shared_allocation_tracking_claim_and_disable(); + Impl::ParallelReduce< FunctorType, ExecPolicy > closure( functor , ExecPolicy(0,work_count) , result_view ); + Kokkos::Impl::shared_allocation_tracking_release_and_enable(); + + closure.execute(); #ifdef KOKKOSP_ENABLE_PROFILING if(Kokkos::Experimental::profileLibraryLoaded()) { @@ -1687,7 +1865,7 @@ void parallel_reduce( const size_t work_count , const FunctorTypeIn & functor_in , ResultType& result , const std::string& str = "" - , typename Impl::enable_if< ! Impl::is_view::value && + , typename Impl::enable_if< ! Kokkos::is_view::value && ! Impl::IsNonTrivialReduceFunctor::value && Impl::is_same< typename Impl::FunctorPolicyExecutionSpace< FunctorTypeIn , void >::execution_space, @@ -1728,7 +1906,11 @@ void parallel_reduce( const size_t work_count } #endif - (void) Impl::ParallelReduce< FunctorType , ExecPolicy >( FunctorType(functor_in) , ExecPolicy(0,work_count) , result_view ); + Kokkos::Impl::shared_allocation_tracking_claim_and_disable(); + Impl::ParallelReduce< FunctorType , ExecPolicy > closure( FunctorType(functor_in) , ExecPolicy(0,work_count) , result_view ); + Kokkos::Impl::shared_allocation_tracking_release_and_enable(); + + closure.execute(); #ifdef KOKKOSP_ENABLE_PROFILING if(Kokkos::Experimental::profileLibraryLoaded()) { @@ -1783,7 +1965,11 @@ void parallel_reduce( const size_t work_count } #endif - (void) Impl::ParallelReduce< FunctorType , ExecPolicy >( functor , ExecPolicy(0,work_count) , result_view ); + Kokkos::Impl::shared_allocation_tracking_claim_and_disable(); + Impl::ParallelReduce< FunctorType , ExecPolicy > closure( functor , ExecPolicy(0,work_count) , result_view ); + Kokkos::Impl::shared_allocation_tracking_release_and_enable(); + + closure.execute(); #ifdef KOKKOSP_ENABLE_PROFILING if(Kokkos::Experimental::profileLibraryLoaded()) { diff --git a/lib/kokkos/core/src/Cuda/Kokkos_Cuda_ReduceScan.hpp b/lib/kokkos/core/src/Cuda/Kokkos_Cuda_ReduceScan.hpp index 5ef16711ee..11871a6abc 100644 --- a/lib/kokkos/core/src/Cuda/Kokkos_Cuda_ReduceScan.hpp +++ b/lib/kokkos/core/src/Cuda/Kokkos_Cuda_ReduceScan.hpp @@ -117,7 +117,7 @@ inline void cuda_inter_warp_reduction( ValueType& value, value = result[0]; - for(int i = 1; (i*step<=max_active_thread) && i #include +#include #include #include @@ -89,6 +90,8 @@ struct AssertShapeBoundsAbort< CudaSpace > //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + namespace Kokkos { namespace Impl { @@ -419,6 +422,8 @@ public: } } +#endif /* #if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) */ + //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- diff --git a/lib/kokkos/core/src/KokkosExp_View.hpp b/lib/kokkos/core/src/KokkosExp_View.hpp index fef76a4570..1fb11abde8 100644 --- a/lib/kokkos/core/src/KokkosExp_View.hpp +++ b/lib/kokkos/core/src/KokkosExp_View.hpp @@ -45,12 +45,14 @@ #define KOKKOS_EXPERIMENTAL_VIEW_HPP #include +#include #include #include #include #include #include +#include //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- @@ -59,17 +61,36 @@ namespace Kokkos { namespace Experimental { namespace Impl { +template< class DstMemorySpace , class SrcMemorySpace > +struct DeepCopy ; + template< class DataType > struct ViewArrayAnalysis ; -template< class DataType , class ValueType , class ArrayLayout > +template< class DataType , class ArrayLayout + , typename ValueType = + typename ViewArrayAnalysis< DataType >::non_const_value_type + > struct ViewDataAnalysis ; -template< class , class = void , typename Enable = void > -class ViewMapping { enum { is_assignable = false }; }; +template< class , class ... > +class ViewMapping { public: enum { is_assignable = false }; }; -template< class DstMemorySpace , class SrcMemorySpace > -struct DeepCopy ; +template< class MemorySpace > +struct ViewOperatorBoundsErrorAbort ; + +template<> +struct ViewOperatorBoundsErrorAbort< Kokkos::HostSpace > { + static void apply( const size_t rank + , const size_t n0 , const size_t n1 + , const size_t n2 , const size_t n3 + , const size_t n4 , const size_t n5 + , const size_t n6 , const size_t n7 + , const size_t i0 , const size_t i1 + , const size_t i2 , const size_t i3 + , const size_t i4 , const size_t i5 + , const size_t i6 , const size_t i7 ); +}; } /* namespace Impl */ } /* namespace Experimental */ @@ -87,102 +108,125 @@ namespace Experimental { * This is an implementation detail of View. It is only of interest * to developers implementing a new specialization of View. * - * Template argument permutations: - * - View< DataType , void , void , void > - * - View< DataType , Space , void , void > - * - View< DataType , Space , MemoryTraits , void > - * - View< DataType , Space , void , MemoryTraits > - * - View< DataType , ArrayLayout , void , void > - * - View< DataType , ArrayLayout , Space , void > - * - View< DataType , ArrayLayout , MemoryTraits , void > - * - View< DataType , ArrayLayout , Space , MemoryTraits > - * - View< DataType , MemoryTraits , void , void > + * Template argument options: + * - View< DataType > + * - View< DataType , Space > + * - View< DataType , Space , MemoryTraits > + * - View< DataType , ArrayLayout > + * - View< DataType , ArrayLayout , Space > + * - View< DataType , ArrayLayout , MemoryTraits > + * - View< DataType , ArrayLayout , Space , MemoryTraits > + * - View< DataType , MemoryTraits > */ -template< class DataType , - class Arg1 = void , - class Arg2 = void , - class Arg3 = void > -class ViewTraits { +template< class DataType , class ... Properties > +struct ViewTraits ; + +template<> +struct ViewTraits< void > +{ + typedef void execution_space ; + typedef void memory_space ; + typedef void array_layout ; + typedef void memory_traits ; +}; + +template< class ... Prop > +struct ViewTraits< void , void , Prop ... > +{ + // Ignore an extraneous 'void' + typedef typename ViewTraits::execution_space execution_space ; + typedef typename ViewTraits::memory_space memory_space ; + typedef typename ViewTraits::array_layout array_layout ; + typedef typename ViewTraits::memory_traits memory_traits ; +}; + +template< class ArrayLayout , class ... Prop > +struct ViewTraits< typename std::enable_if< Kokkos::Impl::is_array_layout::value >::type , ArrayLayout , Prop ... > +{ + // Specify layout, keep subsequent space and memory traits arguments + + typedef typename ViewTraits::execution_space execution_space ; + typedef typename ViewTraits::memory_space memory_space ; + typedef ArrayLayout array_layout ; + typedef typename ViewTraits::memory_traits memory_traits ; +}; + +template< class Space , class ... Prop > +struct ViewTraits< typename std::enable_if< Kokkos::Impl::is_space::value >::type , Space , Prop ... > +{ + // Specify Space, memory traits should be the only subsequent argument + + static_assert( std::is_same< typename ViewTraits::execution_space , void >::value || + std::is_same< typename ViewTraits::memory_space , void >::value || + std::is_same< typename ViewTraits::array_layout , void >::value + , "Only one View Execution or Memory Space template argument" ); + + typedef typename Space::execution_space execution_space ; + typedef typename Space::memory_space memory_space ; + typedef typename execution_space::array_layout array_layout ; + typedef typename ViewTraits::memory_traits memory_traits ; +}; + +template< class MemoryTraits , class ... Prop > +struct ViewTraits< typename std::enable_if< Kokkos::Impl::is_memory_traits::value >::type , MemoryTraits , Prop ... > +{ + // Specify memory trait, should not be any subsequent arguments + + static_assert( std::is_same< typename ViewTraits::execution_space , void >::value || + std::is_same< typename ViewTraits::memory_space , void >::value || + std::is_same< typename ViewTraits::array_layout , void >::value || + std::is_same< typename ViewTraits::memory_traits , void >::value + , "MemoryTrait is the final optional template argument for a View" ); + + typedef void execution_space ; + typedef void memory_space ; + typedef void array_layout ; + typedef MemoryTraits memory_traits ; +}; + + +template< class DataType , class ... Properties > +struct ViewTraits { private: - // Layout, Space, and MemoryTraits are optional - // but need to appear in that order. That means Layout - // can only be Arg1, Space can be Arg1 or Arg2, and - // MemoryTraits can be Arg1, Arg2 or Arg3 + // Unpack the properties arguments + typedef ViewTraits< void , Properties ... > prop ; - enum { Arg1IsLayout = Kokkos::Impl::is_array_layout::value }; + typedef typename + std::conditional< ! std::is_same< typename prop::execution_space , void >::value + , typename prop::execution_space + , Kokkos::DefaultExecutionSpace + >::type + ExecutionSpace ; - enum { Arg1IsSpace = Kokkos::Impl::is_space::value }; - enum { Arg2IsSpace = Kokkos::Impl::is_space::value }; - - enum { Arg1IsMemoryTraits = Kokkos::Impl::is_memory_traits::value }; - enum { Arg2IsMemoryTraits = Kokkos::Impl::is_memory_traits::value }; - enum { Arg3IsMemoryTraits = Kokkos::Impl::is_memory_traits::value }; - - enum { Arg1IsVoid = std::is_same< Arg1 , void >::value }; - enum { Arg2IsVoid = std::is_same< Arg2 , void >::value }; - enum { Arg3IsVoid = std::is_same< Arg3 , void >::value }; - - static_assert( 1 == Arg1IsLayout + Arg1IsSpace + Arg1IsMemoryTraits + Arg1IsVoid - , "Template argument #1 must be layout, space, traits, or void" ); - - // If Arg1 is Layout then Arg2 is Space, MemoryTraits, or void - // If Arg1 is Space then Arg2 is MemoryTraits or void - // If Arg1 is MemoryTraits then Arg2 is void - // If Arg1 is Void then Arg2 is void - - static_assert( ( Arg1IsLayout && ( 1 == Arg2IsSpace + Arg2IsMemoryTraits + Arg2IsVoid ) ) || - ( Arg1IsSpace && ( 0 == Arg2IsSpace ) && ( 1 == Arg2IsMemoryTraits + Arg2IsVoid ) ) || - ( Arg1IsMemoryTraits && Arg2IsVoid ) || - ( Arg1IsVoid && Arg2IsVoid ) - , "Template argument #2 must be space, traits, or void" ); - - // Arg3 is MemoryTraits or void and at most one argument is MemoryTraits - static_assert( ( 1 == Arg3IsMemoryTraits + Arg3IsVoid ) && - ( Arg1IsMemoryTraits + Arg2IsMemoryTraits + Arg3IsMemoryTraits <= 1 ) - , "Template argument #3 must be traits or void" ); - - typedef - typename std::conditional< Arg1IsSpace , Arg1 , - typename std::conditional< Arg2IsSpace , Arg2 , Kokkos::DefaultExecutionSpace - >::type >::type::execution_space - ExecutionSpace ; - - typedef - typename std::conditional< Arg1IsSpace , Arg1 , - typename std::conditional< Arg2IsSpace , Arg2 , Kokkos::DefaultExecutionSpace - >::type >::type::memory_space + typedef typename + std::conditional< ! std::is_same< typename prop::memory_space , void >::value + , typename prop::memory_space + , typename ExecutionSpace::memory_space + >::type MemorySpace ; - typedef - typename Kokkos::Impl::is_space< - typename std::conditional< Arg1IsSpace , Arg1 , - typename std::conditional< Arg2IsSpace , Arg2 , Kokkos::DefaultExecutionSpace - >::type >::type >::host_mirror_space - HostMirrorSpace ; - - typedef - typename std::conditional< Arg1IsLayout , Arg1 , typename ExecutionSpace::array_layout >::type + typedef typename + std::conditional< ! std::is_same< typename prop::array_layout , void >::value + , typename prop::array_layout + , typename ExecutionSpace::array_layout + >::type ArrayLayout ; - // Arg1, Arg2, or Arg3 may be memory traits - typedef - typename std::conditional< Arg1IsMemoryTraits , Arg1 , - typename std::conditional< Arg2IsMemoryTraits , Arg2 , - typename std::conditional< Arg3IsMemoryTraits , Arg3 , MemoryManaged - >::type >::type >::type + typedef typename Kokkos::Impl::is_space< ExecutionSpace >::host_mirror_space + HostMirrorSpace ; + + typedef typename + std::conditional< ! std::is_same< typename prop::memory_traits , void >::value + , typename prop::memory_traits + , typename Kokkos::MemoryManaged + >::type MemoryTraits ; - // Analyze data type's array properties - typedef Kokkos::Experimental::Impl::ViewArrayAnalysis< DataType > array_analysis ; - - // Analyze data type's properties with opportunity to specialize based upon the array value type - typedef Kokkos::Experimental::Impl:: - ViewDataAnalysis< DataType - , typename array_analysis::non_const_value_type - , ArrayLayout - > data_analysis ; + // Analyze data type's properties, + // May be specialized based upon the layout and value type + typedef Kokkos::Experimental::Impl::ViewDataAnalysis< DataType , ArrayLayout > data_analysis ; public: @@ -220,17 +264,17 @@ public: //------------------------------------ // Execution space, memory space, memory access traits, and host mirror space. - typedef ExecutionSpace execution_space ; - typedef MemorySpace memory_space ; - typedef Device device_type ; - typedef MemoryTraits memory_traits ; - typedef HostMirrorSpace host_mirror_space ; + typedef ExecutionSpace execution_space ; + typedef MemorySpace memory_space ; + typedef Kokkos::Device device_type ; + typedef MemoryTraits memory_traits ; + typedef HostMirrorSpace host_mirror_space ; - typedef typename memory_space::size_type size_type ; + typedef typename MemorySpace::size_type size_type ; - enum { is_hostspace = std::is_same< memory_space , HostSpace >::value }; - enum { is_managed = memory_traits::Unmanaged == 0 }; - enum { is_random_access = memory_traits::RandomAccess == 1 }; + enum { is_hostspace = std::is_same< MemorySpace , HostSpace >::value }; + enum { is_managed = MemoryTraits::Unmanaged == 0 }; + enum { is_random_access = MemoryTraits::RandomAccess == 1 }; //------------------------------------ }; @@ -254,11 +298,13 @@ public: * they may occur. * * Valid ways in which template arguments may be specified: - * - View< DataType , Space > - * - View< DataType , Space , MemoryTraits > - * - View< DataType , Space , void , MemoryTraits > + * - View< DataType > + * - View< DataType , Layout > * - View< DataType , Layout , Space > * - View< DataType , Layout , Space , MemoryTraits > + * - View< DataType , Space > + * - View< DataType , Space , MemoryTraits > + * - View< DataType , MemoryTraits > * * \tparam DataType (required) This indicates both the type of each * entry of the array, and the combination of compile-time and @@ -315,10 +361,7 @@ public: * } * \endcode */ -template< class DataType - , class Arg1 = void /* ArrayLayout, SpaceType, or MemoryTraits */ - , class Arg2 = void /* SpaceType or MemoryTraits */ - , class Arg3 = void /* MemoryTraits */ > +template< class DataType , class ... Properties > class View ; } /* namespace Experimental */ @@ -376,21 +419,24 @@ view_alloc( Args ... args ) namespace Kokkos { namespace Experimental { -/**\brief Each R? template argument designates whether the subview argument is a range */ -template< class V - , bool R0 = false , bool R1 = false , bool R2 = false , bool R3 = false - , bool R4 = false , bool R5 = false , bool R6 = false , bool R7 = false > -using Subview = typename Kokkos::Experimental::Impl::SubviewType< V, R0 , R1 , R2 , R3 , R4 , R5 , R6 , R7 >::type ; +template< class DataType , class ... Properties > +class View ; -template< class DataType , class Arg1 , class Arg2 , class Arg3 > -class View : public ViewTraits< DataType , Arg1 , Arg2 , Arg3 > { +template< class > struct is_view : public std::false_type {}; + +template< class D, class ... P > +struct is_view< View > : public std::true_type {}; + +template< class DataType , class ... Properties > +class View : public ViewTraits< DataType , Properties ... > { private: - template< class , class , class , class > friend class View ; + template< class , class ... > friend class View ; + template< class , class ... > friend class Impl::ViewMapping ; - typedef ViewTraits< DataType , Arg1 , Arg2 , Arg3 > traits ; - typedef Kokkos::Experimental::Impl::ViewMapping< traits > map_type ; - typedef Kokkos::Experimental::Impl::SharedAllocationTracker track_type ; + typedef ViewTraits< DataType , Properties ... > traits ; + typedef Kokkos::Experimental::Impl::ViewMapping< traits , void > map_type ; + typedef Kokkos::Experimental::Impl::SharedAllocationTracker track_type ; track_type m_track ; map_type m_map ; @@ -414,16 +460,15 @@ public: /** \brief Compatible view of non-const data type */ typedef View< typename traits::non_const_data_type , - typename traits::array_layout , - typename traits::device_type , - typename traits::memory_traits > + typename traits::array_layout , + typename traits::device_type , + typename traits::memory_traits > non_const_type ; /** \brief Compatible HostMirror view */ typedef View< typename traits::non_const_data_type , typename traits::array_layout , - typename traits::host_mirror_space , - void > + typename traits::host_mirror_space > HostMirror ; //---------------------------------------- @@ -431,6 +476,21 @@ public: enum { Rank = map_type::Rank }; + template< typename iType > + KOKKOS_INLINE_FUNCTION constexpr + typename std::enable_if< std::is_integral::value , size_t >::type + extent( const iType & r ) const + { + return r == 0 ? m_map.dimension_0() : ( + r == 1 ? m_map.dimension_1() : ( + r == 2 ? m_map.dimension_2() : ( + r == 3 ? m_map.dimension_3() : ( + r == 4 ? m_map.dimension_4() : ( + r == 5 ? m_map.dimension_5() : ( + r == 6 ? m_map.dimension_6() : ( + r == 7 ? m_map.dimension_7() : 1 ))))))); + } + KOKKOS_INLINE_FUNCTION constexpr size_t dimension_0() const { return m_map.dimension_0(); } KOKKOS_INLINE_FUNCTION constexpr size_t dimension_1() const { return m_map.dimension_1(); } KOKKOS_INLINE_FUNCTION constexpr size_t dimension_2() const { return m_map.dimension_2(); } @@ -440,6 +500,15 @@ public: KOKKOS_INLINE_FUNCTION constexpr size_t dimension_6() const { return m_map.dimension_6(); } KOKKOS_INLINE_FUNCTION constexpr size_t dimension_7() const { return m_map.dimension_7(); } + KOKKOS_INLINE_FUNCTION constexpr size_t size() const { return m_map.dimension_0() * + m_map.dimension_1() * + m_map.dimension_2() * + m_map.dimension_3() * + m_map.dimension_4() * + m_map.dimension_5() * + m_map.dimension_6() * + m_map.dimension_7(); } + KOKKOS_INLINE_FUNCTION constexpr size_t stride_0() const { return m_map.stride_0(); } KOKKOS_INLINE_FUNCTION constexpr size_t stride_1() const { return m_map.stride_1(); } KOKKOS_INLINE_FUNCTION constexpr size_t stride_2() const { return m_map.stride_2(); } @@ -449,21 +518,27 @@ public: KOKKOS_INLINE_FUNCTION constexpr size_t stride_6() const { return m_map.stride_6(); } KOKKOS_INLINE_FUNCTION constexpr size_t stride_7() const { return m_map.stride_7(); } + template< typename iType > + KOKKOS_INLINE_FUNCTION void stride( iType * const s ) const { m_map.stride(s); } + //---------------------------------------- // Range span is the span which contains all members. typedef typename map_type::reference_type reference_type ; + typedef typename map_type::pointer_type pointer_type ; enum { reference_type_is_lvalue_reference = std::is_lvalue_reference< reference_type >::value }; KOKKOS_INLINE_FUNCTION constexpr size_t span() const { return m_map.span(); } + // Deprecated, use 'span()' instead + KOKKOS_INLINE_FUNCTION constexpr size_t capacity() const { return m_map.span(); } KOKKOS_INLINE_FUNCTION constexpr bool span_is_contiguous() const { return m_map.span_is_contiguous(); } - KOKKOS_INLINE_FUNCTION constexpr typename traits::value_type * data() const { return m_map.data(); } + KOKKOS_INLINE_FUNCTION constexpr pointer_type data() const { return m_map.data(); } // Deprecated, use 'span_is_contigous()' instead KOKKOS_INLINE_FUNCTION constexpr bool is_contiguous() const { return m_map.span_is_contiguous(); } // Deprecated, use 'data()' instead - KOKKOS_INLINE_FUNCTION constexpr typename traits::value_type * ptr_on_device() const { return m_map.data(); } + KOKKOS_INLINE_FUNCTION constexpr pointer_type ptr_on_device() const { return m_map.data(); } //---------------------------------------- // Allow specializations to query their specialized map @@ -485,14 +560,81 @@ private: , Kokkos::Experimental::Impl::Error_view_scalar_reference_to_non_scalar_view >::type scalar_operator_index_type ; + enum { is_default_map = + std::is_same< typename traits::specialize , void >::value && + ( std::is_same< typename traits::array_layout , Kokkos::LayoutLeft >::value || + std::is_same< typename traits::array_layout , Kokkos::LayoutRight >::value || + std::is_same< typename traits::array_layout , Kokkos::LayoutStride >::value + ) }; + + template < bool F , unsigned R + , typename I0 = int + , typename I1 = int + , typename I2 = int + , typename I3 = int + , typename I4 = int + , typename I5 = int + , typename I6 = int + , typename I7 = int > + struct enable { + enum { value = F && ( R == traits::rank ) && + std::is_integral::value && + std::is_integral::value && + std::is_integral::value && + std::is_integral::value && + std::is_integral::value && + std::is_integral::value && + std::is_integral::value && + std::is_integral::value }; + }; + + KOKKOS_INLINE_FUNCTION + void verify_operator_bounds( size_t i0 = 0 , size_t i1 = 0 , size_t i2 = 0 , size_t i3 = 0 + , size_t i4 = 0 , size_t i5 = 0 , size_t i6 = 0 , size_t i7 = 0 ) const + { + if ( ( m_map.dimension_0() <= i0 ) || + ( m_map.dimension_1() <= i1 ) || + ( m_map.dimension_2() <= i2 ) || + ( m_map.dimension_3() <= i3 ) || + ( m_map.dimension_4() <= i4 ) || + ( m_map.dimension_5() <= i5 ) || + ( m_map.dimension_6() <= i6 ) || + ( m_map.dimension_7() <= i7 ) ) { + Kokkos::Experimental::Impl:: + ViewOperatorBoundsErrorAbort< Kokkos::Impl::ActiveExecutionMemorySpace >:: + apply( Rank + , m_map.dimension_0() , m_map.dimension_1() + , m_map.dimension_2() , m_map.dimension_3() + , m_map.dimension_4() , m_map.dimension_5() + , m_map.dimension_6() , m_map.dimension_7() + , i0 , i1 , i2 , i3 , i4 , i5 , i6 , i7 ); + } + } + +#if defined( KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK ) + +#define KOKKOS_VIEW_OPERATOR_VERIFY( I0 , I1 , I2 , I3 , I4 , I5 , I6 , I7 ) \ + Kokkos::Impl::VerifyExecutionCanAccessMemorySpace \ + < Kokkos::Impl::ActiveExecutionMemorySpace , typename traits::memory_space >::verify(); \ + verify_operator_bounds(I0,I1,I2,I3,I4,I5,I6,I7); + +#else + +#define KOKKOS_VIEW_OPERATOR_VERIFY( I0 , I1 , I2 , I3 , I4 , I5 , I6 , I7 ) \ + Kokkos::Impl::VerifyExecutionCanAccessMemorySpace \ + < Kokkos::Impl::ActiveExecutionMemorySpace , typename traits::memory_space >::verify(); + +#endif + public: + //------------------------------ // Rank == 0 KOKKOS_FORCEINLINE_FUNCTION scalar_operator_reference_type operator()() const { - KOKKOS_ASSERT_VIEW_MAPPING_ACCESS( typename traits::memory_space, m_map, Rank, 0, 0, 0, 0, 0, 0, 0, 0 ); + KOKKOS_VIEW_OPERATOR_VERIFY(0,0,0,0,0,0,0,0) return scalar_operator_reference_type( m_map.reference() ); } @@ -502,275 +644,299 @@ public: , const int i1 = 0 , const int i2 = 0 , const int i3 = 0 , const int i4 = 0 , const int i5 = 0 , const int i6 = 0 , const int i7 = 0 ) const { - KOKKOS_ASSERT_VIEW_MAPPING_ACCESS( typename traits::memory_space, m_map, Rank, i0, i1, i2, i3, i4, i5, i6, i7 ); - return m_map.reference(); + KOKKOS_VIEW_OPERATOR_VERIFY(i0,i1,i2,i3,i4,i5,i6,i7) + return scalar_operator_reference_type( m_map.reference() ); } + //------------------------------ // Rank == 1 template< typename I0 > KOKKOS_FORCEINLINE_FUNCTION - typename std::enable_if<( Rank == 1 && std::is_integral::value - ), reference_type >::type + typename std::enable_if< View::enable< is_default_map , 1 , I0 >::value , reference_type >::type operator[]( const I0 & i0 ) const { - KOKKOS_ASSERT_VIEW_MAPPING_ACCESS( typename traits::memory_space, m_map, Rank, i0, 0, 0, 0, 0, 0, 0, 0 ); + KOKKOS_VIEW_OPERATOR_VERIFY(i0,0,0,0,0,0,0,0) + return m_map.m_handle[ m_map.m_offset(i0) ]; + } + + template< typename I0 > + KOKKOS_FORCEINLINE_FUNCTION + typename std::enable_if< View::enable< ! is_default_map , 1 , I0 >::value , reference_type >::type + operator[]( const I0 & i0 ) const + { + KOKKOS_VIEW_OPERATOR_VERIFY(i0,0,0,0,0,0,0,0) return m_map.reference(i0); } template< typename I0 > KOKKOS_FORCEINLINE_FUNCTION - typename std::enable_if<( Rank == 1 && std::is_integral::value - ), reference_type >::type + typename std::enable_if< View::enable< is_default_map , 1 , I0 >::value , reference_type >::type operator()( const I0 & i0 ) const { - KOKKOS_ASSERT_VIEW_MAPPING_ACCESS( typename traits::memory_space, m_map, Rank, i0, 0, 0, 0, 0, 0, 0, 0 ); + KOKKOS_VIEW_OPERATOR_VERIFY(i0,0,0,0,0,0,0,0) + return m_map.m_handle[ m_map.m_offset(i0) ]; + } + + template< typename I0 > + KOKKOS_FORCEINLINE_FUNCTION + typename std::enable_if< View::enable< ! is_default_map , 1 , I0 >::value , reference_type >::type + operator()( const I0 & i0 ) const + { + KOKKOS_VIEW_OPERATOR_VERIFY(i0,0,0,0,0,0,0,0) return m_map.reference(i0); } template< typename I0 > KOKKOS_FORCEINLINE_FUNCTION - reference_type + typename std::enable_if< View::enable< true , 1 , I0 >::value , reference_type >::type operator()( const I0 & i0 - , typename std::enable_if<( Rank == 1 && std::is_integral::value ), const int >::type i1 - , const int i2 = 0 , const int i3 = 0 + , const int i1 , const int i2 = 0 , const int i3 = 0 , const int i4 = 0 , const int i5 = 0 , const int i6 = 0 , const int i7 = 0 ) const { - KOKKOS_ASSERT_VIEW_MAPPING_ACCESS( typename traits::memory_space, m_map, Rank, i0, i1, i2, i3, i4, i5, i6, i7 ); + KOKKOS_VIEW_OPERATOR_VERIFY(i0,i1,i2,i3,i4,i5,i6,i7) return m_map.reference(i0); } + //------------------------------ // Rank == 2 template< typename I0 , typename I1 > KOKKOS_FORCEINLINE_FUNCTION - typename std::enable_if<( Rank == 2 && - std::is_integral::value && - std::is_integral::value - ), reference_type >::type + typename std::enable_if< View::enable< is_default_map , 2 , I0 , I1 >::value , reference_type >::type operator()( const I0 & i0 , const I1 & i1 ) const { - KOKKOS_ASSERT_VIEW_MAPPING_ACCESS( typename traits::memory_space, m_map, Rank, i0, i1, 0, 0, 0, 0, 0, 0 ); + KOKKOS_VIEW_OPERATOR_VERIFY(i0,i1,0,0,0,0,0,0) + return m_map.m_handle[ m_map.m_offset(i0,i1) ]; + } + + template< typename I0 , typename I1 > + KOKKOS_FORCEINLINE_FUNCTION + typename std::enable_if< View::enable< ! is_default_map , 2 , I0 , I1 >::value , reference_type >::type + operator()( const I0 & i0 , const I1 & i1 ) const + { + KOKKOS_VIEW_OPERATOR_VERIFY(i0,i1,0,0,0,0,0,0) return m_map.reference(i0,i1); } template< typename I0 , typename I1 > KOKKOS_FORCEINLINE_FUNCTION - reference_type + typename std::enable_if< View::enable< true , 2 , I0 , I1 >::value , reference_type >::type operator()( const I0 & i0 , const I1 & i1 - , typename std::enable_if<( Rank == 2 && - std::is_integral::value && - std::is_integral::value - ), const int >::type i2 - , const int i3 = 0 + , const int i2 , const int i3 = 0 , const int i4 = 0 , const int i5 = 0 , const int i6 = 0 , const int i7 = 0 ) const { - KOKKOS_ASSERT_VIEW_MAPPING_ACCESS( typename traits::memory_space, m_map, Rank, i0, i1, i2, i3, i4, i5, i6, i7 ); + KOKKOS_VIEW_OPERATOR_VERIFY(i0,i1,i2,i3,i4,i5,i6,i7) return m_map.reference(i0,i1); } + //------------------------------ // Rank == 3 template< typename I0 , typename I1 , typename I2 > KOKKOS_FORCEINLINE_FUNCTION - typename std::enable_if<( Rank == 3 && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value - ), reference_type >::type + typename std::enable_if< View::enable< is_default_map , 3 , I0 , I1 , I2 >::value , reference_type >::type operator()( const I0 & i0 , const I1 & i1 , const I2 & i2 ) const { - KOKKOS_ASSERT_VIEW_MAPPING_ACCESS( typename traits::memory_space, m_map, Rank, i0, i1, i2, 0, 0, 0, 0, 0 ); + KOKKOS_VIEW_OPERATOR_VERIFY(i0,i1,i2,0,0,0,0,0) + return m_map.m_handle[ m_map.m_offset(i0,i1,i2) ]; + } + + template< typename I0 , typename I1 , typename I2 > + KOKKOS_FORCEINLINE_FUNCTION + typename std::enable_if< View::enable< ! is_default_map , 3 , I0 , I1 , I2 >::value , reference_type >::type + operator()( const I0 & i0 , const I1 & i1 , const I2 & i2 ) const + { + KOKKOS_VIEW_OPERATOR_VERIFY(i0,i1,i2,0,0,0,0,0) return m_map.reference(i0,i1,i2); } template< typename I0 , typename I1 , typename I2 > KOKKOS_FORCEINLINE_FUNCTION - reference_type + typename std::enable_if< View::enable< true , 3 , I0 , I1 , I2 >::value , reference_type >::type operator()( const I0 & i0 , const I1 & i1 , const I2 & i2 - , typename std::enable_if<( Rank == 3 && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value - ), const int >::type i3 + , const int i3 , const int i4 = 0 , const int i5 = 0 , const int i6 = 0 , const int i7 = 0 ) const { - KOKKOS_ASSERT_VIEW_MAPPING_ACCESS( typename traits::memory_space, m_map, Rank, i0, i1, i2, i3, i4, i5, i6, i7 ); + KOKKOS_VIEW_OPERATOR_VERIFY(i0,i1,i2,i3,i4,i5,i6,i7) return m_map.reference(i0,i1,i2); } + //------------------------------ // Rank == 4 template< typename I0 , typename I1 , typename I2 , typename I3 > KOKKOS_FORCEINLINE_FUNCTION - typename std::enable_if<( Rank == 4 && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value - ), reference_type >::type + typename std::enable_if< View::enable< is_default_map , 4 , I0 , I1 , I2 , I3 >::value , reference_type >::type operator()( const I0 & i0 , const I1 & i1 , const I2 & i2 , const I3 & i3 ) const { - KOKKOS_ASSERT_VIEW_MAPPING_ACCESS( typename traits::memory_space, m_map, Rank, i0, i1, i2, i3, 0, 0, 0, 0 ); + KOKKOS_VIEW_OPERATOR_VERIFY(i0,i1,i2,i3,0,0,0,0) + return m_map.m_handle[ m_map.m_offset(i0,i1,i2,i3) ]; + } + + template< typename I0 , typename I1 , typename I2 , typename I3 > + KOKKOS_FORCEINLINE_FUNCTION + typename std::enable_if< View::enable< ! is_default_map , 4 , I0 , I1 , I2 , I3 >::value , reference_type >::type + operator()( const I0 & i0 , const I1 & i1 , const I2 & i2 , const I3 & i3 ) const + { + KOKKOS_VIEW_OPERATOR_VERIFY(i0,i1,i2,i3,0,0,0,0) return m_map.reference(i0,i1,i2,i3); } template< typename I0 , typename I1 , typename I2 , typename I3 > KOKKOS_FORCEINLINE_FUNCTION - reference_type + typename std::enable_if< View::enable< true , 4 , I0 , I1 , I2 , I3 >::value , reference_type >::type operator()( const I0 & i0 , const I1 & i1 , const I2 & i2 , const I3 & i3 - , typename std::enable_if<( Rank == 4 && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value - ), const int >::type i4 + , const int i4 , const int i5 = 0 , const int i6 = 0 , const int i7 = 0 ) const { - KOKKOS_ASSERT_VIEW_MAPPING_ACCESS( typename traits::memory_space, m_map, Rank, i0, i1, i2, i3, i4, i5, i6, i7 ); + KOKKOS_VIEW_OPERATOR_VERIFY(i0,i1,i2,i3,i4,i5,i6,i7) return m_map.reference(i0,i1,i2,i3); } + //------------------------------ // Rank == 5 template< typename I0 , typename I1 , typename I2 , typename I3 , typename I4 > KOKKOS_FORCEINLINE_FUNCTION - typename std::enable_if<( Rank == 5 && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value - ), reference_type >::type + typename std::enable_if< View::enable< is_default_map , 5 , I0 , I1 , I2 , I3 , I4 >::value , reference_type >::type operator()( const I0 & i0 , const I1 & i1 , const I2 & i2 , const I3 & i3 , const I4 & i4 ) const { - KOKKOS_ASSERT_VIEW_MAPPING_ACCESS( typename traits::memory_space, m_map, Rank, i0, i1, i2, i3, i4, 0, 0, 0 ); + KOKKOS_VIEW_OPERATOR_VERIFY(i0,i1,i2,i3,i4,0,0,0) + return m_map.m_handle[ m_map.m_offset(i0,i1,i2,i3,i4) ]; + } + + template< typename I0 , typename I1 , typename I2 , typename I3 + , typename I4 > + KOKKOS_FORCEINLINE_FUNCTION + typename std::enable_if< View::enable< ! is_default_map , 5 , I0 , I1 , I2 , I3 , I4 >::value , reference_type >::type + operator()( const I0 & i0 , const I1 & i1 , const I2 & i2 , const I3 & i3 + , const I4 & i4 ) const + { + KOKKOS_VIEW_OPERATOR_VERIFY(i0,i1,i2,i3,i4,0,0,0) return m_map.reference(i0,i1,i2,i3,i4); } template< typename I0 , typename I1 , typename I2 , typename I3 , typename I4 > KOKKOS_FORCEINLINE_FUNCTION - reference_type + typename std::enable_if< View::enable< true , 5 , I0 , I1 , I2 , I3 , I4 >::value , reference_type >::type operator()( const I0 & i0 , const I1 & i1 , const I2 & i2 , const I3 & i3 , const I4 & i4 - , typename std::enable_if<( Rank == 5 && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value - ), const int >::type i5 + , const int i5 , const int i6 = 0 , const int i7 = 0 ) const { - KOKKOS_ASSERT_VIEW_MAPPING_ACCESS( typename traits::memory_space, m_map, Rank, i0, i1, i2, i3, i4, i5, i6, i7 ); + KOKKOS_VIEW_OPERATOR_VERIFY(i0,i1,i2,i3,i4,i5,i6,i7) return m_map.reference(i0,i1,i2,i3,i4); } + //------------------------------ // Rank == 6 template< typename I0 , typename I1 , typename I2 , typename I3 , typename I4 , typename I5 > KOKKOS_FORCEINLINE_FUNCTION - typename std::enable_if<( Rank == 6 && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value - ), reference_type >::type + typename std::enable_if< View::enable< is_default_map , 6 , I0 , I1 , I2 , I3 , I4 , I5 >::value , reference_type >::type operator()( const I0 & i0 , const I1 & i1 , const I2 & i2 , const I3 & i3 , const I4 & i4 , const I5 & i5 ) const { - KOKKOS_ASSERT_VIEW_MAPPING_ACCESS( typename traits::memory_space, m_map, Rank, i0, i1, i2, i3, i4, i5, 0, 0 ); + KOKKOS_VIEW_OPERATOR_VERIFY(i0,i1,i2,i3,i4,i5,0,0) + return m_map.m_handle[ m_map.m_offset(i0,i1,i2,i3,i4,i5) ]; + } + + template< typename I0 , typename I1 , typename I2 , typename I3 + , typename I4 , typename I5 > + KOKKOS_FORCEINLINE_FUNCTION + typename std::enable_if< View::enable< ! is_default_map , 6 , I0 , I1 , I2 , I3 , I4 , I5 >::value , reference_type >::type + operator()( const I0 & i0 , const I1 & i1 , const I2 & i2 , const I3 & i3 + , const I4 & i4 , const I5 & i5 ) const + { + KOKKOS_VIEW_OPERATOR_VERIFY(i0,i1,i2,i3,i4,i5,0,0) return m_map.reference(i0,i1,i2,i3,i4,i5); } template< typename I0 , typename I1 , typename I2 , typename I3 , typename I4 , typename I5 > KOKKOS_FORCEINLINE_FUNCTION - reference_type + typename std::enable_if< View::enable< true , 6 , I0 , I1 , I2 , I3 , I4 , I5 >::value , reference_type >::type operator()( const I0 & i0 , const I1 & i1 , const I2 & i2 , const I3 & i3 , const I4 & i4 , const I5 & i5 - , typename std::enable_if<( Rank == 6 && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value - ), const int >::type i6 + , const int i6 , const int i7 = 0 ) const { - KOKKOS_ASSERT_VIEW_MAPPING_ACCESS( typename traits::memory_space, m_map, Rank, i0, i1, i2, i3, i4, i5, i6, i7 ); + KOKKOS_VIEW_OPERATOR_VERIFY(i0,i1,i2,i3,i4,i5,i6,i7) return m_map.reference(i0,i1,i2,i3,i4,i5); } + //------------------------------ // Rank == 7 template< typename I0 , typename I1 , typename I2 , typename I3 , typename I4 , typename I5 , typename I6 > KOKKOS_FORCEINLINE_FUNCTION - typename std::enable_if<( Rank == 7 && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value - ), reference_type >::type + typename std::enable_if< View::enable< is_default_map , 7 , I0 , I1 , I2 , I3 , I4 , I5 , I6 >::value , reference_type >::type operator()( const I0 & i0 , const I1 & i1 , const I2 & i2 , const I3 & i3 , const I4 & i4 , const I5 & i5 , const I6 & i6 ) const { - KOKKOS_ASSERT_VIEW_MAPPING_ACCESS( typename traits::memory_space, m_map, Rank, i0, i1, i2, i3, i4, i5, i6, 0 ); + KOKKOS_VIEW_OPERATOR_VERIFY(i0,i1,i2,i3,i4,i5,i6,0) + return m_map.m_handle[ m_map.m_offset(i0,i1,i2,i3,i4,i5,i6) ]; + } + + template< typename I0 , typename I1 , typename I2 , typename I3 + , typename I4 , typename I5 , typename I6 > + KOKKOS_FORCEINLINE_FUNCTION + typename std::enable_if< View::enable< ! is_default_map , 7 , I0 , I1 , I2 , I3 , I4 , I5 , I6 >::value , reference_type >::type + operator()( const I0 & i0 , const I1 & i1 , const I2 & i2 , const I3 & i3 + , const I4 & i4 , const I5 & i5 , const I6 & i6 ) const + { + KOKKOS_VIEW_OPERATOR_VERIFY(i0,i1,i2,i3,i4,i5,i6,0) return m_map.reference(i0,i1,i2,i3,i4,i5,i6); } template< typename I0 , typename I1 , typename I2 , typename I3 , typename I4 , typename I5 , typename I6 > KOKKOS_FORCEINLINE_FUNCTION - reference_type + typename std::enable_if< View::enable< true , 7 , I0 , I1 , I2 , I3 , I4 , I5 , I6 >::value , reference_type >::type operator()( const I0 & i0 , const I1 & i1 , const I2 & i2 , const I3 & i3 , const I4 & i4 , const I5 & i5 , const I6 & i6 - , typename std::enable_if<( Rank == 7 && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value - ), const int >::type i7 + , const int i7 ) const { - KOKKOS_ASSERT_VIEW_MAPPING_ACCESS( typename traits::memory_space, m_map, Rank, i0, i1, i2, i3, i4, i5, i6, i7 ); + KOKKOS_VIEW_OPERATOR_VERIFY(i0,i1,i2,i3,i4,i5,i6,i7) return m_map.reference(i0,i1,i2,i3,i4,i5,i6); } + //------------------------------ // Rank == 8 template< typename I0 , typename I1 , typename I2 , typename I3 , typename I4 , typename I5 , typename I6 , typename I7 > KOKKOS_FORCEINLINE_FUNCTION - typename std::enable_if<( Rank == 8 && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value && - std::is_integral::value - ), reference_type >::type + typename std::enable_if< View::enable< is_default_map , 8 , I0 , I1 , I2 , I3 , I4 , I5 , I6 , I7 >::value , reference_type >::type operator()( const I0 & i0 , const I1 & i1 , const I2 & i2 , const I3 & i3 , const I4 & i4 , const I5 & i5 , const I6 & i6 , const I7 & i7 ) const { - KOKKOS_ASSERT_VIEW_MAPPING_ACCESS( typename traits::memory_space, m_map, Rank, i0, i1, i2, i3, i4, i5, i6, i7 ); + KOKKOS_VIEW_OPERATOR_VERIFY(i0,i1,i2,i3,i4,i5,i6,i7) + return m_map.m_handle[ m_map.m_offset(i0,i1,i2,i3,i4,i5,i6,i7) ]; + } + + template< typename I0 , typename I1 , typename I2 , typename I3 + , typename I4 , typename I5 , typename I6 , typename I7 > + KOKKOS_FORCEINLINE_FUNCTION + typename std::enable_if< View::enable< ! is_default_map , 8 , I0 , I1 , I2 , I3 , I4 , I5 , I6 , I7 >::value , reference_type >::type + operator()( const I0 & i0 , const I1 & i1 , const I2 & i2 , const I3 & i3 + , const I4 & i4 , const I5 & i5 , const I6 & i6 , const I7 & i7 ) const + { + KOKKOS_VIEW_OPERATOR_VERIFY(i0,i1,i2,i3,i4,i5,i6,i7) return m_map.reference(i0,i1,i2,i3,i4,i5,i6,i7); } +#undef KOKKOS_VIEW_OPERATOR_VERIFY + //---------------------------------------- + // Standard destructor, constructors, and assignment operators KOKKOS_INLINE_FUNCTION ~View() {} @@ -791,53 +957,57 @@ public: View & operator = ( View && rhs ) { m_track = rhs.m_track ; m_map = rhs.m_map ; return *this ; } //---------------------------------------- + // Compatible view copy constructor and assignment + // may assign unmanaged from managed. - template< class RT , class R1 , class R2 , class R3 > + template< class RT , class ... RP > KOKKOS_INLINE_FUNCTION - View( const View & rhs ) - : m_track( rhs.m_track ) + View( const View & rhs ) + : m_track( rhs.m_track , traits::is_managed ) , m_map() { - typedef typename View::traits SrcTraits ; - typedef Kokkos::Experimental::Impl::ViewMapping< traits , SrcTraits > Mapping ; + typedef typename View::traits SrcTraits ; + typedef Kokkos::Experimental::Impl::ViewMapping< traits , SrcTraits , void > Mapping ; static_assert( Mapping::is_assignable , "Incompatible View copy construction" ); Mapping::assign( m_map , rhs.m_map , rhs.m_track ); } - template< class RT , class R1 , class R2 , class R3 > + template< class RT , class ... RP > KOKKOS_INLINE_FUNCTION - View( View && rhs ) - : m_track( rhs.m_track ) - , m_map() + View & operator = ( const View & rhs ) { - typedef typename View::traits SrcTraits ; - typedef Kokkos::Experimental::Impl::ViewMapping< traits , SrcTraits > Mapping ; - static_assert( Mapping::is_assignable , "Incompatible View move construction" ); - Mapping::assign( m_map , rhs.m_map , rhs.m_track ); - } - - template< class RT , class R1 , class R2 , class R3 > - KOKKOS_INLINE_FUNCTION - View & operator = ( const View & rhs ) - { - typedef typename View::traits SrcTraits ; - typedef Kokkos::Experimental::Impl::ViewMapping< traits , SrcTraits > Mapping ; + typedef typename View::traits SrcTraits ; + typedef Kokkos::Experimental::Impl::ViewMapping< traits , SrcTraits , void > Mapping ; static_assert( Mapping::is_assignable , "Incompatible View copy assignment" ); Mapping::assign( m_map , rhs.m_map , rhs.m_track ); - m_track.operator=( rhs.m_track ); + m_track.assign( rhs.m_track , traits::is_managed ); return *this ; } - template< class RT , class R1 , class R2 , class R3 > + //---------------------------------------- + // Compatible subview constructor + // may assign unmanaged from managed. + + template< class RT , class ... RP , class Arg0 , class ... Args > KOKKOS_INLINE_FUNCTION - View & operator = ( View && rhs ) + View( const View< RT , RP... > & src_view + , const Arg0 & arg0 , Args ... args ) + : m_track( src_view.m_track , traits::is_managed ) + , m_map() { - typedef typename View::traits SrcTraits ; - typedef Kokkos::Experimental::Impl::ViewMapping< traits , SrcTraits > Mapping ; - static_assert( Mapping::is_assignable , "Incompatible View move assignment" ); - Mapping::assign( m_map , rhs.m_map , rhs.m_track ); - m_track.operator=( rhs.m_track ); - return *this ; + typedef View< RT , RP... > SrcType ; + + typedef Kokkos::Experimental::Impl::ViewMapping + < void /* deduce destination view type from source view traits */ + , typename SrcType::traits + , Arg0 , Args... > Mapping ; + + typedef typename Mapping::type DstType ; + + static_assert( Kokkos::Experimental::Impl::ViewMapping< View , DstType , void >::is_assignable + , "Subview construction requires compatible view and subview arguments" ); + + Mapping::assign( m_map, src_view.m_map, arg0 , args... ); } //---------------------------------------- @@ -851,19 +1021,23 @@ private: map_type m_map ; ExecSpace m_space ; - KOKKOS_INLINE_FUNCTION void destroy_shared_allocation() { m_map.destroy( m_space ); } }; public: + KOKKOS_INLINE_FUNCTION + int use_count() const { return m_track.use_count(); } + inline const std::string label() const { return m_track.template get_label< typename traits::memory_space >(); } + // Disambiguate from subview constructor. template< class Prop > explicit inline View( const Prop & arg_prop - , const size_t arg_N0 = 0 + , typename std::enable_if< ! is_view::value , + const size_t >::type arg_N0 = 0 , const size_t arg_N1 = 0 , const size_t arg_N2 = 0 , const size_t arg_N3 = 0 @@ -902,21 +1076,23 @@ public: // Construct the mapping object prior to start of tracking // to assign destroy functor and possibly initialize. - m_map = map_type( record->data() + m_map = map_type( reinterpret_cast< pointer_type >( record->data() ) , prop.allow_padding , arg_N0 , arg_N1 , arg_N2 , arg_N3 , arg_N4 , arg_N5 , arg_N6 , arg_N7 ); - // Copy the destroy functor into the allocation record before initiating tracking. - record->m_destroy.m_map = m_map ; - record->m_destroy.m_space = prop.execution ; - + // If constructing the plan for destructing as well + // Copy the destroy functor into the allocation record + // before initiating tracking. if ( prop.initialize.value ) { m_map.construct( prop.execution ); + + record->m_destroy.m_map = m_map ; + record->m_destroy.m_space = prop.execution ; } - // Destroy functor assigned and initialization complete, start tracking - m_track = track_type( record ); + // Setup and initialization complete, start tracking + m_track.assign_allocated_record_to_uninitialized( record ); } template< class Prop > @@ -952,18 +1128,19 @@ public: // Construct the mapping object prior to start of tracking // to assign destroy functor and possibly initialize. - m_map = map_type( record->data() , prop.allow_padding , arg_layout ); + m_map = map_type( reinterpret_cast< pointer_type >( record->data() ) , prop.allow_padding , arg_layout ); // Copy the destroy functor into the allocation record before initiating tracking. - record->m_destroy.m_map = m_map ; - record->m_destroy.m_space = prop.execution ; if ( prop.initialize.value ) { m_map.construct( prop.execution ); + + record->m_destroy.m_map = m_map ; + record->m_destroy.m_space = prop.execution ; } - // Destroy functor assigned and initialization complete, start tracking - m_track = track_type( record ); + // Setup and initialization complete, start tracking + m_track.assign_allocated_record_to_uninitialized( record ); } //---------------------------------------- @@ -984,7 +1161,7 @@ public: } explicit inline - View( typename traits::value_type * const arg_ptr + View( pointer_type arg_ptr , const size_t arg_N0 = 0 , const size_t arg_N1 = 0 , const size_t arg_N2 = 0 @@ -1001,7 +1178,7 @@ public: {} explicit inline - View( typename traits::value_type * const arg_ptr + View( pointer_type arg_ptr , typename traits::array_layout & arg_layout ) : m_track() // No memory tracking @@ -1037,449 +1214,69 @@ public: , const size_t arg_N6 = 0 , const size_t arg_N7 = 0 ) : m_track() // No memory tracking - , m_map( arg_space.get_shmem( map_type::memory_span( std::integral_constant() - , arg_N0 , arg_N1 , arg_N2 , arg_N3 - , arg_N4 , arg_N5 , arg_N6 , arg_N7 ) ) - , std::integral_constant() - , arg_N0 , arg_N1 , arg_N2 , arg_N3 - , arg_N4 , arg_N5 , arg_N6 , arg_N7 ) + , m_map( reinterpret_cast( + arg_space.get_shmem( + map_type::memory_span( std::integral_constant() + , arg_N0 , arg_N1 , arg_N2 , arg_N3 + , arg_N4 , arg_N5 , arg_N6 , arg_N7 ) ) ) + , std::integral_constant() + , arg_N0 , arg_N1 , arg_N2 , arg_N3 + , arg_N4 , arg_N5 , arg_N6 , arg_N7 ) {} - - //---------------------------------------- - // Subviews - -private: - - /**\brief Private method to support extensibility of subview construction */ - KOKKOS_INLINE_FUNCTION - View( const track_type & arg_track , const map_type & arg_map ) - : m_track( arg_track ) - , m_map( arg_map ) - {} - - explicit KOKKOS_INLINE_FUNCTION - View( const track_type & rhs ) - : m_track( rhs ) - , m_map() - {} - -public: - - template< class D , class A1 , class A2 , class A3 - , class T0 , class T1 , class T2 , class T3 - , class T4 , class T5 , class T6 , class T7 > - friend - KOKKOS_INLINE_FUNCTION - Kokkos::Experimental::Subview< View< D , A1 , A2 , A3 > - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - > - subview( const View< D , A1 , A2 , A3 > & src - , T0 const & arg0 , T1 const & arg1 , T2 const & arg2 , T3 const & arg3 - , T4 const & arg4 , T5 const & arg5 , T6 const & arg6 , T7 const & arg7 - ); - - template< class D , class A1 , class A2 , class A3 - , class T0 , class T1 , class T2 , class T3 - , class T4 , class T5 , class T6 > - friend - KOKKOS_INLINE_FUNCTION - Kokkos::Experimental::Subview< View< D , A1 , A2 , A3 > - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - > - subview( const View< D , A1 , A2 , A3 > & src - , T0 const & arg0 , T1 const & arg1 , T2 const & arg2 , T3 const & arg3 - , T4 const & arg4 , T5 const & arg5 , T6 const & arg6 - ); - - template< class D , class A1 , class A2 , class A3 - , class T0 , class T1 , class T2 , class T3 - , class T4 , class T5 > - friend - KOKKOS_INLINE_FUNCTION - Kokkos::Experimental::Subview< View< D , A1 , A2 , A3 > - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - > - subview( const View< D , A1 , A2 , A3 > & src - , T0 const & arg0 , T1 const & arg1 , T2 const & arg2 , T3 const & arg3 - , T4 const & arg4 , T5 const & arg5 - ); - - template< class D , class A1 , class A2 , class A3 - , class T0 , class T1 , class T2 , class T3 - , class T4 > - friend - KOKKOS_INLINE_FUNCTION - Kokkos::Experimental::Subview< View< D , A1 , A2 , A3 > - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - > - subview( const View< D , A1 , A2 , A3 > & src - , T0 const & arg0 , T1 const & arg1 , T2 const & arg2 , T3 const & arg3 - , T4 const & arg4 - ); - - template< class D , class A1 , class A2 , class A3 - , class T0 , class T1 , class T2 , class T3 > - friend - KOKKOS_INLINE_FUNCTION - Kokkos::Experimental::Subview< View< D , A1 , A2 , A3 > - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - > - subview( const View< D , A1 , A2 , A3 > & src - , T0 const & arg0 , T1 const & arg1 , T2 const & arg2 , T3 const & arg3 - ); - - template< class D , class A1 , class A2 , class A3 - , class T0 , class T1 , class T2 > - friend - KOKKOS_INLINE_FUNCTION - Kokkos::Experimental::Subview< View< D , A1 , A2 , A3 > - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - > - subview( const View< D , A1 , A2 , A3 > & src - , T0 const & arg0 , T1 const & arg1 , T2 const & arg2 - ); - - template< class D , class A1 , class A2 , class A3 - , class T0 , class T1 > - friend - KOKKOS_INLINE_FUNCTION - Kokkos::Experimental::Subview< View< D , A1 , A2 , A3 > - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - > - subview( const View< D , A1 , A2 , A3 > & src - , T0 const & arg0 , T1 const & arg1 - ); - - template< class D, class A1, class A2, class A3, class T0 > - friend - KOKKOS_INLINE_FUNCTION - Kokkos::Experimental::Subview< View< D, A1, A2, A3 > - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - > - subview( const View< D, A1, A2, A3 > & src , T0 const & arg0 ); - }; -template< class > struct is_view : public std::false_type {}; - -template< class D, class A1, class A2, class A3 > -struct is_view< View > : public std::true_type {}; - //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- -template< class D, class A1, class A2, class A3 - , class T0 , class T1 , class T2 , class T3 - , class T4 , class T5 , class T6 , class T7 > +template< class V , class ... Args > +using Subview = + typename Kokkos::Experimental::Impl::ViewMapping + < void /* deduce subview type from source view traits */ + , V + , Args ... + >::type ; + +template< class D, class ... P , class ... Args > KOKKOS_INLINE_FUNCTION -Kokkos::Experimental::Subview< View< D, A1, A2, A3 > - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - > -subview( const View< D, A1, A2, A3 > & src - , T0 const & arg0 , T1 const & arg1 , T2 const & arg2 , T3 const & arg3 - , T4 const & arg4 , T5 const & arg5 , T6 const & arg6 , T7 const & arg7 - ) +typename Kokkos::Experimental::Impl::ViewMapping + < void /* deduce subview type from source view traits */ + , ViewTraits< D , P... > + , Args ... + >::type +subview( const View< D, P... > & src , Args ... args ) { - typedef View< D, A1, A2, A3 > SrcView ; + static_assert( View< D , P... >::Rank == sizeof...(Args) , + "subview requires one argument for each source View rank" ); - typedef Kokkos::Experimental::Impl::SubviewMapping - < typename SrcView::traits - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - > Mapping ; - - typedef typename Mapping::type DstView ; - - static_assert( SrcView::Rank == 8 , "Subview of rank 8 View requires 8 arguments" ); - - DstView dst( src.m_track ); - - Mapping::assign( dst.m_map, src.m_map, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 ); - - return dst ; + return typename + Kokkos::Experimental::Impl::ViewMapping + < void /* deduce subview type from source view traits */ + , ViewTraits< D , P ... > + , Args ... >::type( src , args ... ); } -template< class D, class A1, class A2, class A3 - , class T0 , class T1 , class T2 , class T3 - , class T4 , class T5 , class T6 > +template< class MemoryTraits , class D, class ... P , class ... Args > KOKKOS_INLINE_FUNCTION -Kokkos::Experimental::Subview< View< D, A1, A2, A3 > - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - > -subview( const View< D, A1, A2, A3 > & src - , T0 const & arg0 , T1 const & arg1 , T2 const & arg2 , T3 const & arg3 - , T4 const & arg4 , T5 const & arg5 , T6 const & arg6 - ) +typename Kokkos::Experimental::Impl::ViewMapping + < void /* deduce subview type from source view traits */ + , ViewTraits< D , P... > + , Args ... + >::template apply< MemoryTraits >::type +subview( const View< D, P... > & src , Args ... args ) { - typedef View< D, A1, A2, A3 > SrcView ; + static_assert( View< D , P... >::Rank == sizeof...(Args) , + "subview requires one argument for each source View rank" ); - typedef Kokkos::Experimental::Impl::SubviewMapping - < typename SrcView::traits - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - > Mapping ; - - typedef typename Mapping::type DstView ; - - static_assert( SrcView::Rank == 7 , "Subview of rank 7 View requires 7 arguments" ); - - DstView dst( src.m_track ); - - Mapping::assign( dst.m_map, src.m_map, arg0, arg1, arg2, arg3, arg4, arg5, arg6, 0 ); - - return dst ; + return typename + Kokkos::Experimental::Impl::ViewMapping + < void /* deduce subview type from source view traits */ + , ViewTraits< D , P ... > + , Args ... > + ::template apply< MemoryTraits > + ::type( src , args ... ); } -template< class D, class A1, class A2, class A3 - , class T0 , class T1 , class T2 , class T3 - , class T4 , class T5 > -KOKKOS_INLINE_FUNCTION -Kokkos::Experimental::Subview< View< D, A1, A2, A3 > - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - > -subview( const View< D, A1, A2, A3 > & src - , T0 const & arg0 , T1 const & arg1 , T2 const & arg2 , T3 const & arg3 - , T4 const & arg4 , T5 const & arg5 - ) -{ - typedef View< D, A1, A2, A3 > SrcView ; - typedef Kokkos::Experimental::Impl::SubviewMapping - < typename SrcView::traits - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - > Mapping ; - - typedef typename Mapping::type DstView ; - - static_assert( SrcView::Rank == 6 , "Subview of rank 6 View requires 6 arguments" ); - - DstView dst( src.m_track ); - - Mapping::assign( dst.m_map, src.m_map, arg0, arg1, arg2, arg3, arg4, arg5, 0, 0 ); - - return dst ; -} - -template< class D, class A1, class A2, class A3 - , class T0 , class T1 , class T2 , class T3 - , class T4 > -KOKKOS_INLINE_FUNCTION -Kokkos::Experimental::Subview< View< D, A1, A2, A3 > - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - > -subview( const View< D, A1, A2, A3 > & src - , T0 const & arg0 , T1 const & arg1 , T2 const & arg2 , T3 const & arg3 - , T4 const & arg4 - ) -{ - typedef View< D, A1, A2, A3 > SrcView ; - - typedef Kokkos::Experimental::Impl::SubviewMapping - < typename SrcView::traits - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - > Mapping ; - - typedef typename Mapping::type DstView ; - - static_assert( SrcView::Rank == 5 , "Subview of rank 5 View requires 5 arguments" ); - - DstView dst( src.m_track ); - - Mapping::assign( dst.m_map, src.m_map, arg0, arg1, arg2, arg3, arg4, 0, 0, 0 ); - - return dst ; -} - -template< class D, class A1, class A2, class A3 - , class T0 , class T1 , class T2 , class T3 > -KOKKOS_INLINE_FUNCTION -Kokkos::Experimental::Subview< View< D, A1, A2, A3 > - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - > -subview( const View< D, A1, A2, A3 > & src - , T0 const & arg0 , T1 const & arg1 , T2 const & arg2 , T3 const & arg3 - ) -{ - typedef View< D, A1, A2, A3 > SrcView ; - - typedef Kokkos::Experimental::Impl::SubviewMapping - < typename SrcView::traits - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - > Mapping ; - - typedef typename Mapping::type DstView ; - - static_assert( SrcView::Rank == 4 , "Subview of rank 4 View requires 4 arguments" ); - - DstView dst( src.m_track ); - - Mapping::assign( dst.m_map, src.m_map, arg0, arg1, arg2, arg3, 0, 0, 0, 0 ); - - return dst ; -} - -template< class D, class A1, class A2, class A3 - , class T0 , class T1 , class T2 > -KOKKOS_INLINE_FUNCTION -Kokkos::Experimental::Subview< View< D, A1, A2, A3 > - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - > -subview( const View< D, A1, A2, A3 > & src - , T0 const & arg0 , T1 const & arg1 , T2 const & arg2 - ) -{ - typedef View< D, A1, A2, A3 > SrcView ; - - typedef Kokkos::Experimental::Impl::SubviewMapping - < typename SrcView::traits - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - > Mapping ; - - typedef typename Mapping::type DstView ; - - static_assert( SrcView::Rank == 3 , "Subview of rank 3 View requires 3 arguments" ); - - DstView dst( src.m_track ); - - Mapping::assign( dst.m_map, src.m_map, arg0, arg1, arg2, 0, 0, 0, 0, 0 ); - - return dst ; -} - -template< class D, class A1, class A2, class A3 - , class T0 , class T1 > -KOKKOS_INLINE_FUNCTION -Kokkos::Experimental::Subview< View< D, A1, A2, A3 > - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - > -subview( const View< D, A1, A2, A3 > & src - , T0 const & arg0 , T1 const & arg1 - ) -{ - typedef View< D, A1, A2, A3 > SrcView ; - - typedef Kokkos::Experimental::Impl::SubviewMapping - < typename SrcView::traits - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - > Mapping ; - - typedef typename Mapping::type DstView ; - - static_assert( SrcView::Rank == 2 , "Subview of rank 2 View requires 2 arguments" ); - - DstView dst( src.m_track ); - - Mapping::assign( dst.m_map, src.m_map, arg0, arg1, 0, 0, 0, 0, 0, 0 ); - - return dst ; -} - -template< class D, class A1, class A2, class A3, class T0 > -KOKKOS_INLINE_FUNCTION -Kokkos::Experimental::Subview< View< D, A1, A2, A3 > - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - > -subview( const View< D, A1, A2, A3 > & src , T0 const & arg0 ) -{ - typedef View< D, A1, A2, A3 > SrcView ; - - typedef Kokkos::Experimental::Impl::SubviewMapping - < typename SrcView::traits - , Kokkos::Experimental::Impl::ViewOffsetRange::is_range - > Mapping ; - - typedef typename Mapping::type DstView ; - - static_assert( SrcView::Rank == 1 , "Subview of rank 1 View requires 1 arguments" ); - - DstView dst( src.m_track ); - - Mapping::assign( dst.m_map , src.m_map , arg0, 0, 0, 0, 0, 0, 0, 0 ); - - return dst ; -} } /* namespace Experimental */ } /* namespace Kokkos */ @@ -1490,15 +1287,14 @@ subview( const View< D, A1, A2, A3 > & src , T0 const & arg0 ) namespace Kokkos { namespace Experimental { -template< class LT , class L1 , class L2 , class L3 - , class RT , class R1 , class R2 , class R3 > +template< class LT , class ... LP , class RT , class ... RP > KOKKOS_INLINE_FUNCTION -bool operator == ( const View & lhs , - const View & rhs ) +bool operator == ( const View & lhs , + const View & rhs ) { // Same data, layout, dimensions - typedef ViewTraits lhs_traits ; - typedef ViewTraits rhs_traits ; + typedef ViewTraits lhs_traits ; + typedef ViewTraits rhs_traits ; return std::is_same< typename lhs_traits::const_value_type , @@ -1507,7 +1303,7 @@ bool operator == ( const View & lhs , typename rhs_traits::array_layout >::value && std::is_same< typename lhs_traits::memory_space , typename rhs_traits::memory_space >::value && - lhs_traits::Rank == rhs_traits::Rank && + lhs_traits::rank == rhs_traits::rank && lhs.data() == rhs.data() && lhs.span() == rhs.span() && lhs.dimension_0() == rhs.dimension_0() && @@ -1520,11 +1316,10 @@ bool operator == ( const View & lhs , lhs.dimension_7() == rhs.dimension_7(); } -template< class LT , class L1 , class L2 , class L3 - , class RT , class R1 , class R2 , class R3 > +template< class LT , class ... LP , class RT , class ... RP > KOKKOS_INLINE_FUNCTION -bool operator != ( const View & lhs , - const View & rhs ) +bool operator != ( const View & lhs , + const View & rhs ) { return ! ( operator==(lhs,rhs) ); } @@ -1535,6 +1330,37 @@ bool operator != ( const View & lhs , //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- +namespace Kokkos { +namespace Impl { + +#if defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + +inline +void shared_allocation_tracking_claim_and_disable() +{ Kokkos::Experimental::Impl::SharedAllocationRecord::tracking_claim_and_disable(); } + +inline +void shared_allocation_tracking_release_and_enable() +{ Kokkos::Experimental::Impl::SharedAllocationRecord::tracking_release_and_enable(); } + +#else + +inline +void shared_allocation_tracking_claim_and_disable() +{ Kokkos::Impl::AllocationTracker::disable_tracking(); } + +inline +void shared_allocation_tracking_release_and_enable() +{ Kokkos::Impl::AllocationTracker::enable_tracking(); } + +#endif + +} /* namespace Impl */ +} /* namespace Kokkos */ + +//---------------------------------------------------------------------------- +//---------------------------------------------------------------------------- + namespace Kokkos { namespace Experimental { namespace Impl { @@ -1575,7 +1401,9 @@ struct ViewFill { typedef typename OutputView::execution_space execution_space ; typedef Kokkos::RangePolicy< execution_space > Policy ; - (void) Kokkos::Impl::ParallelFor< ViewFill , Policy >( *this , Policy( 0 , output.dimension_0() ) ); + const Kokkos::Impl::ParallelFor< ViewFill , Policy > closure( *this , Policy( 0 , output.dimension_0() ) ); + + closure.execute(); execution_space::fence(); } @@ -1617,7 +1445,8 @@ struct ViewRemap { { typedef typename OutputView::execution_space execution_space ; typedef Kokkos::RangePolicy< execution_space > Policy ; - (void) Kokkos::Impl::ParallelFor< ViewRemap , Policy >( *this , Policy( 0 , n0 ) ); + const Kokkos::Impl::ParallelFor< ViewRemap , Policy > closure( *this , Policy( 0 , n0 ) ); + closure.execute(); } KOKKOS_INLINE_FUNCTION @@ -1646,49 +1475,62 @@ namespace Kokkos { namespace Experimental { /** \brief Deep copy a value from Host memory into a view. */ -template< class DT , class D1 , class D2 , class D3 > +template< class DT , class ... DP > inline -void deep_copy( const View & dst - , typename ViewTraits::const_value_type & value ) +void deep_copy + ( const View & dst + , typename ViewTraits::const_value_type & value + , typename std::enable_if< + std::is_same< typename ViewTraits::specialize , void >::value + >::type * = 0 ) { - static_assert( std::is_same< typename ViewTraits::non_const_value_type , - typename ViewTraits::value_type >::value - , "ERROR: Incompatible deep_copy( View , value )" ); + static_assert( + std::is_same< typename ViewTraits::non_const_value_type , + typename ViewTraits::value_type >::value + , "deep_copy requires non-const type" ); - Kokkos::Experimental::Impl::ViewFill< View >( dst , value ); + Kokkos::Experimental::Impl::ViewFill< View >( dst , value ); } /** \brief Deep copy into a value in Host memory from a view. */ -template< class ST , class S1 , class S2 , class S3 > +template< class ST , class ... SP > inline -void deep_copy( ST & dst , const View & src ) +void deep_copy + ( ST & dst + , const View & src + , typename std::enable_if< + std::is_same< typename ViewTraits::specialize , void >::value + >::type * = 0 ) { - static_assert( ViewTraits::rank == 0 + static_assert( ViewTraits::rank == 0 , "ERROR: Non-rank-zero view in deep_copy( value , View )" ); - typedef ViewTraits src_traits ; + typedef ViewTraits src_traits ; typedef typename src_traits::memory_space src_memory_space ; Kokkos::Impl::DeepCopy< HostSpace , src_memory_space >( & dst , src.data() , sizeof(ST) ); } //---------------------------------------------------------------------------- /** \brief A deep copy between views of compatible type, and rank zero. */ -template< class DT , class D1 , class D2 , class D3 - , class ST , class S1 , class S2 , class S3 > +template< class DT , class ... DP , class ST , class ... SP > inline -void deep_copy( const View & dst , - const View & src , - typename std::enable_if<( - // Rank zero: - ( unsigned(ViewTraits::rank) == unsigned(0) ) && - ( unsigned(ViewTraits::rank) == unsigned(0) ) && - // Same type and destination is not constant: - std::is_same< typename ViewTraits::value_type , - typename ViewTraits::non_const_value_type >::value - )>::type * = 0 ) +void deep_copy + ( const View & dst + , const View & src + , typename std::enable_if<( + std::is_same< typename ViewTraits::specialize , void >::value && + std::is_same< typename ViewTraits::specialize , void >::value && + ( unsigned(ViewTraits::rank) == unsigned(0) && + unsigned(ViewTraits::rank) == unsigned(0) ) + )>::type * = 0 ) { - typedef View dst_type ; - typedef View src_type ; + static_assert( + std::is_same< typename ViewTraits::value_type , + typename ViewTraits::non_const_value_type >::value + , "deep_copy requires matching non-const destination type" ); + + typedef View dst_type ; + typedef View src_type ; typedef typename dst_type::value_type value_type ; typedef typename dst_type::memory_space dst_memory_space ; @@ -1703,30 +1545,30 @@ void deep_copy( const View & dst , /** \brief A deep copy between views of the default specialization, compatible type, * same non-zero rank, same contiguous layout. */ -template< class DT , class D1 , class D2 , class D3 , - class ST , class S1 , class S2 , class S3 > +template< class DT , class ... DP , class ST , class ... SP > inline -void deep_copy( const View & dst , - const View & src , - typename std::enable_if<( - // destination is non-const. - std::is_same< typename ViewTraits::value_type , - typename ViewTraits::non_const_value_type >::value - && - // Same non-zero rank: - ( unsigned(ViewTraits::rank) != 0 ) - && - ( unsigned(ViewTraits::rank) == - unsigned(ViewTraits::rank) ) - && - // Not specialized, default ViewMapping - std::is_same< typename ViewTraits::specialize , void >::value - && - std::is_same< typename ViewTraits::specialize , void >::value - )>::type * = 0 ) +void deep_copy + ( const View & dst + , const View & src + , typename std::enable_if<( + std::is_same< typename ViewTraits::specialize , void >::value && + std::is_same< typename ViewTraits::specialize , void >::value && + ( unsigned(ViewTraits::rank) != 0 || + unsigned(ViewTraits::rank) != 0 ) + )>::type * = 0 ) { - typedef View dst_type ; - typedef View src_type ; + static_assert( + std::is_same< typename ViewTraits::value_type , + typename ViewTraits::non_const_value_type >::value + , "deep_copy requires non-const destination type" ); + + static_assert( + ( unsigned(ViewTraits::rank) == + unsigned(ViewTraits::rank) ) + , "deep_copy requires Views of equal rank" ); + + typedef View dst_type ; + typedef View src_type ; typedef typename dst_type::execution_space dst_execution_space ; typedef typename dst_type::memory_space dst_memory_space ; @@ -1742,10 +1584,10 @@ void deep_copy( const View & dst , // If same type, equal layout, equal dimensions, equal span, and contiguous memory then can byte-wise copy - if ( std::is_same< typename ViewTraits::value_type , - typename ViewTraits::non_const_value_type >::value && - std::is_same< typename ViewTraits::array_layout , - typename ViewTraits::array_layout >::value && + if ( std::is_same< typename ViewTraits::value_type , + typename ViewTraits::non_const_value_type >::value && + std::is_same< typename ViewTraits::array_layout , + typename ViewTraits::array_layout >::value && dst.span_is_contiguous() && src.span_is_contiguous() && dst.span() == src.span() && @@ -1781,17 +1623,17 @@ void deep_copy( const View & dst , namespace Kokkos { namespace Experimental { -template< class T , class A1, class A2, class A3 > +template< class T , class ... P > inline -typename Kokkos::Experimental::View::HostMirror -create_mirror( const Kokkos::Experimental::View & src +typename Kokkos::Experimental::View::HostMirror +create_mirror( const Kokkos::Experimental::View & src , typename std::enable_if< - ! std::is_same< typename Kokkos::Experimental::ViewTraits::array_layout + ! std::is_same< typename Kokkos::Experimental::ViewTraits::array_layout , Kokkos::LayoutStride >::value >::type * = 0 ) { - typedef View src_type ; + typedef View src_type ; typedef typename src_type::HostMirror dst_type ; return dst_type( std::string( src.label() ).append("_mirror") @@ -1805,17 +1647,17 @@ create_mirror( const Kokkos::Experimental::View & src , src.dimension_7() ); } -template< class T , class A1, class A2, class A3 > +template< class T , class ... P > inline -typename Kokkos::Experimental::View::HostMirror -create_mirror( const Kokkos::Experimental::View & src +typename Kokkos::Experimental::View::HostMirror +create_mirror( const Kokkos::Experimental::View & src , typename std::enable_if< - std::is_same< typename Kokkos::Experimental::ViewTraits::array_layout + std::is_same< typename Kokkos::Experimental::ViewTraits::array_layout , Kokkos::LayoutStride >::value >::type * = 0 ) { - typedef View src_type ; + typedef View src_type ; typedef typename src_type::HostMirror dst_type ; Kokkos::LayoutStride layout ; @@ -1841,17 +1683,17 @@ create_mirror( const Kokkos::Experimental::View & src return dst_type( std::string( src.label() ).append("_mirror") , layout ); } -template< class T , class A1 , class A2 , class A3 > +template< class T , class ... P > inline -typename Kokkos::Experimental::View::HostMirror -create_mirror_view( const Kokkos::Experimental::View & src +typename Kokkos::Experimental::View::HostMirror +create_mirror_view( const Kokkos::Experimental::View & src , typename std::enable_if<( - std::is_same< typename Kokkos::Experimental::View::memory_space - , typename Kokkos::Experimental::View::HostMirror::memory_space + std::is_same< typename Kokkos::Experimental::View::memory_space + , typename Kokkos::Experimental::View::HostMirror::memory_space >::value && - std::is_same< typename Kokkos::Experimental::View::data_type - , typename Kokkos::Experimental::View::HostMirror::data_type + std::is_same< typename Kokkos::Experimental::View::data_type + , typename Kokkos::Experimental::View::HostMirror::data_type >::value )>::type * = 0 ) @@ -1859,17 +1701,17 @@ create_mirror_view( const Kokkos::Experimental::View & src return src ; } -template< class T , class A1 , class A2 , class A3 > +template< class T , class ... P > inline -typename Kokkos::Experimental::View::HostMirror -create_mirror_view( const Kokkos::Experimental::View & src +typename Kokkos::Experimental::View::HostMirror +create_mirror_view( const Kokkos::Experimental::View & src , typename std::enable_if< ! ( - std::is_same< typename Kokkos::Experimental::View::memory_space - , typename Kokkos::Experimental::View::HostMirror::memory_space + std::is_same< typename Kokkos::Experimental::View::memory_space + , typename Kokkos::Experimental::View::HostMirror::memory_space >::value && - std::is_same< typename Kokkos::Experimental::View::data_type - , typename Kokkos::Experimental::View::HostMirror::data_type + std::is_same< typename Kokkos::Experimental::View::data_type + , typename Kokkos::Experimental::View::HostMirror::data_type >::value )>::type * = 0 ) @@ -1887,9 +1729,9 @@ namespace Kokkos { namespace Experimental { /** \brief Resize a view with copying old data to new data at the corresponding indices. */ -template< class T , class A1 , class A2 , class A3 > +template< class T , class ... P > inline -void resize( Kokkos::Experimental::View & v , +void resize( Kokkos::Experimental::View & v , const size_t n0 = 0 , const size_t n1 = 0 , const size_t n2 = 0 , @@ -1899,9 +1741,9 @@ void resize( Kokkos::Experimental::View & v , const size_t n6 = 0 , const size_t n7 = 0 ) { - typedef Kokkos::Experimental::View view_type ; + typedef Kokkos::Experimental::View view_type ; - static_assert( Kokkos::Experimental::ViewTraits::is_managed , "Can only resize managed views" ); + static_assert( Kokkos::Experimental::ViewTraits::is_managed , "Can only resize managed views" ); view_type v_resized( v.label(), n0, n1, n2, n3, n4, n5, n6, n7 ); @@ -1911,9 +1753,9 @@ void resize( Kokkos::Experimental::View & v , } /** \brief Resize a view with copying old data to new data at the corresponding indices. */ -template< class T , class A1 , class A2 , class A3 > +template< class T , class ... P > inline -void realloc( Kokkos::Experimental::View & v , +void realloc( Kokkos::Experimental::View & v , const size_t n0 = 0 , const size_t n1 = 0 , const size_t n2 = 0 , @@ -1923,9 +1765,9 @@ void realloc( Kokkos::Experimental::View & v , const size_t n6 = 0 , const size_t n7 = 0 ) { - typedef Kokkos::Experimental::View view_type ; + typedef Kokkos::Experimental::View view_type ; - static_assert( Kokkos::Experimental::ViewTraits::is_managed , "Can only realloc managed views" ); + static_assert( Kokkos::Experimental::ViewTraits::is_managed , "Can only realloc managed views" ); const std::string label = v.label(); @@ -1943,18 +1785,20 @@ void realloc( Kokkos::Experimental::View & v , namespace Kokkos { -template< class D , class A1 = void , class A2 = void , class A3 = void > -using ViewTraits = Kokkos::Experimental::ViewTraits ; +template< class D , class ... P > +using ViewTraits = Kokkos::Experimental::ViewTraits ; -template< class D , class A1 = void , class A2 = void , class A3 = void , class S = void > -using View = Kokkos::Experimental::View ; +template< class D , class ... P > +using View = Kokkos::Experimental::View ; +using Kokkos::Experimental::ALL ; using Kokkos::Experimental::deep_copy ; using Kokkos::Experimental::create_mirror ; using Kokkos::Experimental::create_mirror_view ; using Kokkos::Experimental::subview ; using Kokkos::Experimental::resize ; using Kokkos::Experimental::realloc ; +using Kokkos::Experimental::is_view ; namespace Impl { diff --git a/lib/kokkos/core/src/Kokkos_Array.hpp b/lib/kokkos/core/src/Kokkos_Array.hpp index 7fe8b1c394..80a388901a 100644 --- a/lib/kokkos/core/src/Kokkos_Array.hpp +++ b/lib/kokkos/core/src/Kokkos_Array.hpp @@ -45,6 +45,7 @@ #define KOKKOS_ARRAY #include +#include #include namespace Kokkos { diff --git a/lib/kokkos/core/src/Kokkos_Complex.hpp b/lib/kokkos/core/src/Kokkos_Complex.hpp new file mode 100644 index 0000000000..11aaf96177 --- /dev/null +++ b/lib/kokkos/core/src/Kokkos_Complex.hpp @@ -0,0 +1,529 @@ +/* +//@HEADER +// ************************************************************************ +// +// Kokkos v. 2.0 +// Copyright (2014) Sandia Corporation +// +// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, +// the U.S. Government retains certain rights in this software. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the Corporation nor the names of the +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Questions? Contact H. Carter Edwards (hcedwar@sandia.gov) +// +// ************************************************************************ +//@HEADER +*/ +#ifndef KOKKOS_COMPLEX_HPP +#define KOKKOS_COMPLEX_HPP + +#include +#include +#include + +namespace Kokkos { + +/// \class complex +/// \brief Partial reimplementation of std::complex that works as the +/// result of a Kokkos::parallel_reduce. +/// \tparam RealType The type of the real and imaginary parts of the +/// complex number. As with std::complex, this is only defined for +/// \c float, \c double, and long double. The latter is +/// currently forbidden in CUDA device kernels. +template +class complex { +private: + RealType re_, im_; + +public: + //! The type of the real or imaginary parts of this complex number. + typedef RealType value_type; + + //! Default constructor (initializes both real and imaginary parts to zero). + KOKKOS_INLINE_FUNCTION complex () : + re_ (0.0), im_ (0.0) + {} + + //! Copy constructor. + KOKKOS_INLINE_FUNCTION complex (const complex& src) : + re_ (src.re_), im_ (src.im_) + {} + + //! Copy constructor from volatile. + KOKKOS_INLINE_FUNCTION complex (const volatile complex& src) : + re_ (src.re_), im_ (src.im_) + {} + + /// \brief Conversion constructor from std::complex. + /// + /// This constructor cannot be called in a CUDA device function, + /// because std::complex's methods and nonmember functions are not + /// marked as CUDA device functions. + template + complex (const std::complex& src) : + re_ (std::real (src)), im_ (std::imag (src)) + {} + + /// \brief Conversion operator to std::complex. + /// + /// This operator cannot be called in a CUDA device function, + /// because std::complex's methods and nonmember functions are not + /// marked as CUDA device functions. + operator std::complex () const { + return std::complex (re_, im_); + } + + /// \brief Constructor that takes just the real part, and sets the + /// imaginary part to zero. + template + KOKKOS_INLINE_FUNCTION complex (const InputRealType& val) : + re_ (val), im_ (0.0) + {} + + //! Constructor that takes the real and imaginary parts. + template + KOKKOS_INLINE_FUNCTION complex (const RealType1& re, const RealType2& im) : + re_ (re), im_ (im) + {} + + //! Assignment operator. + template + KOKKOS_INLINE_FUNCTION + complex& operator= (const complex& src) { + re_ = src.re_; + im_ = src.im_; + return *this; + } + + //! Assignment operator. + template + KOKKOS_INLINE_FUNCTION + volatile complex& operator= (const complex& src) volatile { + re_ = src.re_; + im_ = src.im_; + return *this; + } + + //! Assignment operator. + template + KOKKOS_INLINE_FUNCTION + volatile complex& operator= (const volatile complex& src) volatile { + re_ = src.re_; + im_ = src.im_; + return *this; + } + + //! Assignment operator. + template + KOKKOS_INLINE_FUNCTION + complex& operator= (const volatile complex& src) { + re_ = src.re_; + im_ = src.im_; + return *this; + } + + //! Assignment operator (from a real number). + template + KOKKOS_INLINE_FUNCTION + complex& operator= (const InputRealType& val) { + re_ = val; + im_ = static_cast (0.0); + return *this; + } + + //! Assignment operator (from a real number). + template + KOKKOS_INLINE_FUNCTION + void operator= (const InputRealType& val) volatile { + re_ = val; + im_ = static_cast (0.0); + } + + /// \brief Assignment operator from std::complex. + /// + /// This constructor cannot be called in a CUDA device function, + /// because std::complex's methods and nonmember functions are not + /// marked as CUDA device functions. + template + complex& operator= (const std::complex& src) { + re_ = std::real (src); + im_ = std::imag (src); + return *this; + } + + //! The imaginary part of this complex number. + KOKKOS_INLINE_FUNCTION RealType& imag () { + return im_; + } + + //! The real part of this complex number. + KOKKOS_INLINE_FUNCTION RealType& real () { + return re_; + } + + //! The imaginary part of this complex number. + KOKKOS_INLINE_FUNCTION const RealType imag () const { + return im_; + } + + //! The real part of this complex number. + KOKKOS_INLINE_FUNCTION const RealType real () const { + return re_; + } + + //! The imaginary part of this complex number (volatile overload). + KOKKOS_INLINE_FUNCTION volatile RealType& imag () volatile { + return im_; + } + + //! The real part of this complex number (volatile overload). + KOKKOS_INLINE_FUNCTION volatile RealType& real () volatile { + return re_; + } + + //! The imaginary part of this complex number (volatile overload). + KOKKOS_INLINE_FUNCTION const RealType imag () const volatile { + return im_; + } + + //! The real part of this complex number (volatile overload). + KOKKOS_INLINE_FUNCTION const RealType real () const volatile { + return re_; + } + + KOKKOS_INLINE_FUNCTION + complex& operator += (const complex& src) { + re_ += src.re_; + im_ += src.im_; + return *this; + } + + KOKKOS_INLINE_FUNCTION + void operator += (const volatile complex& src) volatile { + re_ += src.re_; + im_ += src.im_; + } + + KOKKOS_INLINE_FUNCTION + complex& operator += (const RealType& src) { + re_ += src; + return *this; + } + + KOKKOS_INLINE_FUNCTION + void operator += (const volatile RealType& src) volatile { + re_ += src; + } + + KOKKOS_INLINE_FUNCTION + complex& operator -= (const complex& src) { + re_ -= src.re_; + im_ -= src.im_; + return *this; + } + + KOKKOS_INLINE_FUNCTION + complex& operator -= (const RealType& src) { + re_ -= src; + return *this; + } + + KOKKOS_INLINE_FUNCTION + complex& operator *= (const complex& src) { + const RealType realPart = re_ * src.re_ - im_ * src.im_; + const RealType imagPart = re_ * src.im_ + im_ * src.re_; + re_ = realPart; + im_ = imagPart; + return *this; + } + + KOKKOS_INLINE_FUNCTION + void operator *= (const volatile complex& src) volatile { + const RealType realPart = re_ * src.re_ - im_ * src.im_; + const RealType imagPart = re_ * src.im_ + im_ * src.re_; + re_ = realPart; + im_ = imagPart; + } + + KOKKOS_INLINE_FUNCTION + complex& operator *= (const RealType& src) { + re_ *= src; + im_ *= src; + return *this; + } + + KOKKOS_INLINE_FUNCTION + void operator *= (const volatile RealType& src) volatile { + re_ *= src; + im_ *= src; + } + + KOKKOS_INLINE_FUNCTION + complex& operator /= (const complex& y) { + // Scale (by the "1-norm" of y) to avoid unwarranted overflow. + // If the real part is +/-Inf and the imaginary part is -/+Inf, + // this won't change the result. + const RealType s = ::fabs (y.real ()) + ::fabs (y.imag ()); + + // If s is 0, then y is zero, so x/y == real(x)/0 + i*imag(x)/0. + // In that case, the relation x/y == (x/s) / (y/s) doesn't hold, + // because y/s is NaN. + if (s == 0.0) { + this->re_ /= s; + this->im_ /= s; + } + else { + const complex x_scaled (this->re_ / s, this->im_ / s); + const complex y_conj_scaled (y.re_ / s, -(y.im_) / s); + const RealType y_scaled_abs = y_conj_scaled.re_ * y_conj_scaled.re_ + + y_conj_scaled.im_ * y_conj_scaled.im_; // abs(y) == abs(conj(y)) + *this = x_scaled * y_conj_scaled; + *this /= y_scaled_abs; + } + return *this; + } + + KOKKOS_INLINE_FUNCTION + complex& operator /= (const RealType& src) { + re_ /= src; + im_ /= src; + return *this; + } +}; + +//! Binary + operator for complex. +template +KOKKOS_INLINE_FUNCTION +complex +operator + (const complex& x, const complex& y) { + return complex (x.real () + y.real (), x.imag () + y.imag ()); +} + +//! Unary + operator for complex. +template +KOKKOS_INLINE_FUNCTION +complex +operator + (const complex& x) { + return x; +} + +//! Binary - operator for complex. +template +KOKKOS_INLINE_FUNCTION +complex +operator - (const complex& x, const complex& y) { + return complex (x.real () - y.real (), x.imag () - y.imag ()); +} + +//! Unary - operator for complex. +template +KOKKOS_INLINE_FUNCTION +complex +operator - (const complex& x) { + return complex (-x.real (), -x.imag ()); +} + +//! Binary * operator for complex. +template +KOKKOS_INLINE_FUNCTION +complex +operator * (const complex& x, const complex& y) { + return complex (x.real () * y.real () - x.imag () * y.imag (), + x.real () * y.imag () + x.imag () * y.real ()); +} + +/// \brief Binary * operator for std::complex and complex. +/// +/// This function exists because GCC 4.7.2 (and perhaps other +/// compilers) are not able to deduce that they can multiply +/// std::complex by Kokkos::complex, by first converting std::complex +/// to Kokkos::complex. +/// +/// This function cannot be called in a CUDA device function, because +/// std::complex's methods and nonmember functions are not marked as +/// CUDA device functions. +template +complex +operator * (const std::complex& x, const complex& y) { + return complex (x.real () * y.real () - x.imag () * y.imag (), + x.real () * y.imag () + x.imag () * y.real ()); +} + +/// \brief Binary * operator for RealType times complex. +/// +/// This function exists because the compiler doesn't know that +/// RealType and complex commute with respect to operator*. +template +KOKKOS_INLINE_FUNCTION +complex +operator * (const RealType& x, const complex& y) { + return complex (x * y.real (), x * y.imag ()); +} + + +//! Imaginary part of a complex number. +template +KOKKOS_INLINE_FUNCTION +RealType imag (const complex& x) { + return x.imag (); +} + +//! Real part of a complex number. +template +KOKKOS_INLINE_FUNCTION +RealType real (const complex& x) { + return x.real (); +} + +//! Absolute value (magnitude) of a complex number. +template +KOKKOS_INLINE_FUNCTION +RealType abs (const complex& x) { + // FIXME (mfh 31 Oct 2014) Scale to avoid unwarranted overflow. + return ::sqrt (real (x) * real (x) + imag (x) * imag (x)); +} + +//! Conjugate of a complex number. +template +KOKKOS_INLINE_FUNCTION +complex conj (const complex& x) { + return complex (real (x), -imag (x)); +} + + +//! Binary operator / for complex and real numbers +template +KOKKOS_INLINE_FUNCTION +complex +operator / (const complex& x, const RealType2& y) { + return complex (real (x) / y, imag (x) / y); +} + +//! Binary operator / for complex. +template +KOKKOS_INLINE_FUNCTION +complex +operator / (const complex& x, const complex& y) { + // Scale (by the "1-norm" of y) to avoid unwarranted overflow. + // If the real part is +/-Inf and the imaginary part is -/+Inf, + // this won't change the result. + const RealType s = ::fabs (real (y)) + ::fabs (imag (y)); + + // If s is 0, then y is zero, so x/y == real(x)/0 + i*imag(x)/0. + // In that case, the relation x/y == (x/s) / (y/s) doesn't hold, + // because y/s is NaN. + if (s == 0.0) { + return complex (real (x) / s, imag (x) / s); + } + else { + const complex x_scaled (real (x) / s, imag (x) / s); + const complex y_conj_scaled (real (y) / s, -imag (y) / s); + const RealType y_scaled_abs = real (y_conj_scaled) * real (y_conj_scaled) + + imag (y_conj_scaled) * imag (y_conj_scaled); // abs(y) == abs(conj(y)) + complex result = x_scaled * y_conj_scaled; + result /= y_scaled_abs; + return result; + } +} + +//! Equality operator for two complex numbers. +template +KOKKOS_INLINE_FUNCTION +bool operator == (const complex& x, const complex& y) { + return real (x) == real (y) && imag (x) == imag (y); +} + +//! Equality operator for std::complex and Kokkos::complex. +template +KOKKOS_INLINE_FUNCTION +bool operator == (const std::complex& x, const complex& y) { + return std::real (x) == real (y) && std::imag (x) == imag (y); +} + +//! Equality operator for complex and real number. +template +KOKKOS_INLINE_FUNCTION +bool operator == (const complex& x, const RealType2& y) { + return real (x) == y && imag (x) == static_cast (0.0); +} + +//! Equality operator for real and complex number. +template +KOKKOS_INLINE_FUNCTION +bool operator == (const RealType& x, const complex& y) { + return y == x; +} + +//! Inequality operator for two complex numbers. +template +KOKKOS_INLINE_FUNCTION +bool operator != (const complex& x, const complex& y) { + return real (x) != real (y) || imag (x) != imag (y); +} + +//! Inequality operator for std::complex and Kokkos::complex. +template +KOKKOS_INLINE_FUNCTION +bool operator != (const std::complex& x, const complex& y) { + return std::real (x) != real (y) || std::imag (x) != imag (y); +} + +//! Inequality operator for complex and real number. +template +KOKKOS_INLINE_FUNCTION +bool operator != (const complex& x, const RealType2& y) { + return real (x) != y || imag (x) != static_cast (0.0); +} + +//! Inequality operator for real and complex number. +template +KOKKOS_INLINE_FUNCTION +bool operator != (const RealType& x, const complex& y) { + return y != x; +} + +template +std::ostream& operator << (std::ostream& os, const complex& x) { + const std::complex x_std (Kokkos::real (x), Kokkos::imag (x)); + os << x_std; + return os; +} + +template +std::ostream& operator >> (std::ostream& os, complex& x) { + std::complex x_std; + os >> x_std; + x = x_std; // only assigns on success of above + return os; +} + + +} // namespace Kokkos + +#endif // KOKKOS_COMPLEX_HPP diff --git a/lib/kokkos/core/src/Kokkos_Core.hpp b/lib/kokkos/core/src/Kokkos_Core.hpp index 2578313d77..ba4d2de15f 100644 --- a/lib/kokkos/core/src/Kokkos_Core.hpp +++ b/lib/kokkos/core/src/Kokkos_Core.hpp @@ -49,22 +49,22 @@ #include -#if defined( KOKKOS_HAVE_CUDA ) -#include +#if defined( KOKKOS_HAVE_SERIAL ) +#include #endif #if defined( KOKKOS_HAVE_OPENMP ) #include #endif -#if defined( KOKKOS_HAVE_SERIAL ) -#include -#endif - #if defined( KOKKOS_HAVE_PTHREAD ) #include #endif +#if defined( KOKKOS_HAVE_CUDA ) +#include +#endif + #include #include #include @@ -72,10 +72,8 @@ #include #include -#include - #ifdef KOKKOS_HAVE_CXX11 -////#include +#include #endif @@ -107,9 +105,70 @@ void finalize_all(); void fence(); +} // namespace Kokkos + +//---------------------------------------------------------------------------- +//---------------------------------------------------------------------------- + +namespace Kokkos { +namespace Experimental { + +/* Allocate memory from a memory space. + * The allocation is tracked in Kokkos memory tracking system, so + * leaked memory can be identified. + */ +template< class Space = typename Kokkos::DefaultExecutionSpace::memory_space > +inline +void * kokkos_malloc( const std::string & arg_alloc_label + , const size_t arg_alloc_size ) +{ + typedef typename Space::memory_space MemorySpace ; + return Impl::SharedAllocationRecord< MemorySpace >:: + allocate_tracked( MemorySpace() , arg_alloc_label , arg_alloc_size ); } -#ifdef KOKKOS_HAVE_CXX11 +template< class Space = typename Kokkos::DefaultExecutionSpace::memory_space > +inline +void * kokkos_malloc( const size_t arg_alloc_size ) +{ + typedef typename Space::memory_space MemorySpace ; + return Impl::SharedAllocationRecord< MemorySpace >:: + allocate_tracked( MemorySpace() , "no-label" , arg_alloc_size ); +} + +template< class Space = typename Kokkos::DefaultExecutionSpace::memory_space > +inline +void kokkos_free( void * arg_alloc ) +{ + typedef typename Space::memory_space MemorySpace ; + return Impl::SharedAllocationRecord< MemorySpace >:: + deallocate_tracked( arg_alloc ); +} + +template< class Space = typename Kokkos::DefaultExecutionSpace::memory_space > +inline +void * kokkos_realloc( void * arg_alloc , const size_t arg_alloc_size ) +{ + typedef typename Space::memory_space MemorySpace ; + return Impl::SharedAllocationRecord< MemorySpace >:: + reallocate_tracked( arg_alloc , arg_alloc_size ); +} + +} // namespace Experimental +} // namespace Kokkos + +#if defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + +namespace Kokkos { + +using Kokkos::Experimental::kokkos_malloc ; +using Kokkos::Experimental::kokkos_realloc ; +using Kokkos::Experimental::kokkos_free ; + +} + +#else + namespace Kokkos { namespace Impl { @@ -161,7 +220,10 @@ void kokkos_free(const void* ptr) { template< class Arg = DefaultExecutionSpace> -const void* kokkos_realloc(const void* old_ptr, size_t size) { +void* kokkos_realloc(const void* old_ptr, size_t size) { + if(old_ptr == NULL) + return kokkos_malloc(size); + typedef typename Arg::memory_space MemorySpace; typedef typename MemorySpace::allocator allocator; Impl::AllocationTracker tracker = Impl::AllocationTracker::find(old_ptr); @@ -172,64 +234,11 @@ const void* kokkos_realloc(const void* old_ptr, size_t size) { } } // namespace Kokkos + #endif //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- -namespace Kokkos { -namespace Experimental { - -template< class Space = typename Kokkos::DefaultExecutionSpace::memory_space > -inline -void * kokkos_malloc( const size_t arg_alloc_size ) -{ - typedef typename Space::memory_space MemorySpace ; - typedef Kokkos::Experimental::Impl::SharedAllocationRecord< void , void > RecordBase ; - typedef Kokkos::Experimental::Impl::SharedAllocationRecord< MemorySpace , void > RecordHost ; - - RecordHost * const r = RecordHost::allocate( MemorySpace() , "kokkos_malloc" , arg_alloc_size ); - - RecordBase::increment( r ); - - return r->data(); -} - -template< class Space = typename Kokkos::DefaultExecutionSpace::memory_space > -inline -void kokkos_free( void * arg_alloc ) -{ - typedef typename Space::memory_space MemorySpace ; - typedef Kokkos::Experimental::Impl::SharedAllocationRecord< void , void > RecordBase ; - typedef Kokkos::Experimental::Impl::SharedAllocationRecord< MemorySpace , void > RecordHost ; - - RecordHost * const r = RecordHost::get_record( arg_alloc ); - - RecordBase::decrement( r ); -} - -template< class Space = typename Kokkos::DefaultExecutionSpace::memory_space > -inline -void * kokkos_realloc( void * arg_alloc , const size_t arg_alloc_size ) -{ - typedef typename Space::memory_space MemorySpace ; - typedef Kokkos::Experimental::Impl::SharedAllocationRecord< void , void > RecordBase ; - typedef Kokkos::Experimental::Impl::SharedAllocationRecord< MemorySpace , void > RecordHost ; - - RecordHost * const r_old = RecordHost::get_record( arg_alloc ); - RecordHost * const r_new = RecordHost::allocate( MemorySpace() , "kokkos_malloc" , arg_alloc_size ); - - Kokkos::Impl::DeepCopy( r_new->data() , r_old->data() - , std::min( r_old->size() , r_new->size() ) ); - - RecordBase::increment( r_new ); - RecordBase::decrement( r_old ); - - return r_new->data(); -} - -} // namespace Experimental -} // namespace Kokkos - #endif diff --git a/lib/kokkos/core/src/Kokkos_Core_fwd.hpp b/lib/kokkos/core/src/Kokkos_Core_fwd.hpp index 2cde9299a4..7e18884218 100644 --- a/lib/kokkos/core/src/Kokkos_Core_fwd.hpp +++ b/lib/kokkos/core/src/Kokkos_Core_fwd.hpp @@ -50,6 +50,22 @@ #include +//---------------------------------------------------------------------------- +//---------------------------------------------------------------------------- + +namespace Kokkos { + +struct AUTO_t { + KOKKOS_INLINE_FUNCTION + constexpr const AUTO_t & operator()() const { return *this ; } +}; + +namespace { +/**\brief Token to indicate that a parameter's value is to be automatically selected */ +constexpr AUTO_t AUTO = Kokkos::AUTO_t(); +} +} + //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- // Forward declarations for class inter-relationships @@ -58,6 +74,12 @@ namespace Kokkos { class HostSpace ; ///< Memory space for main process and CPU execution spaces +#ifdef KOKKOS_HAVE_HBWSPACE +namespace Experimental { +class HBWSpace ; /// Memory space for hbw_malloc from memkind (e.g. for KNL processor) +} +#endif + #if defined( KOKKOS_HAVE_SERIAL ) class Serial ; ///< Execution space main process on CPU #endif // defined( KOKKOS_HAVE_SERIAL ) @@ -162,9 +184,15 @@ struct VerifyExecutionCanAccessMemorySpace< Space , Space > Kokkos::Impl::VerifyExecutionCanAccessMemorySpace< \ Kokkos::Impl::ActiveExecutionMemorySpace , DATA_SPACE >::verify() +//---------------------------------------------------------------------------- +//---------------------------------------------------------------------------- + namespace Kokkos { void fence(); } +//---------------------------------------------------------------------------- +//---------------------------------------------------------------------------- + #endif /* #ifndef KOKKOS_CORE_FWD_HPP */ diff --git a/lib/kokkos/core/src/Kokkos_CudaSpace.hpp b/lib/kokkos/core/src/Kokkos_CudaSpace.hpp index 3caf250536..e6b337ecac 100644 --- a/lib/kokkos/core/src/Kokkos_CudaSpace.hpp +++ b/lib/kokkos/core/src/Kokkos_CudaSpace.hpp @@ -75,6 +75,10 @@ public: typedef unsigned int size_type ; + /*--------------------------------*/ + +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + typedef Impl::CudaMallocAllocator allocator; /** \brief Allocate a contiguous block of memory. @@ -96,6 +100,8 @@ public: ); #endif +#endif /* #if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) */ + /*--------------------------------*/ CudaSpace(); @@ -103,10 +109,10 @@ public: CudaSpace & operator = ( const CudaSpace & rhs ) = default ; ~CudaSpace() = default ; - /**\brief Allocate memory in the cuda space */ + /**\brief Allocate untracked memory in the cuda space */ void * allocate( const size_t arg_alloc_size ) const ; - /**\brief Deallocate memory in the cuda space */ + /**\brief Deallocate untracked memory in the cuda space */ void deallocate( void * const arg_alloc_ptr , const size_t arg_alloc_size ) const ; @@ -162,6 +168,10 @@ public: /** \brief If UVM capability is available */ static bool available(); + /*--------------------------------*/ + +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + typedef Impl::CudaUVMAllocator allocator; /** \brief Allocate a contiguous block of memory. @@ -182,6 +192,9 @@ public: , ::cudaChannelFormatDesc const & desc ); #endif + +#endif /* #if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) */ + /*--------------------------------*/ CudaUVMSpace(); @@ -189,10 +202,10 @@ public: CudaUVMSpace & operator = ( const CudaUVMSpace & rhs ) = default ; ~CudaUVMSpace() = default ; - /**\brief Allocate memory in the cuda space */ + /**\brief Allocate untracked memory in the cuda space */ void * allocate( const size_t arg_alloc_size ) const ; - /**\brief Deallocate memory in the cuda space */ + /**\brief Deallocate untracked memory in the cuda space */ void deallocate( void * const arg_alloc_ptr , const size_t arg_alloc_size ) const ; @@ -223,6 +236,9 @@ public: typedef Kokkos::Device device_type; typedef unsigned int size_type ; + /*--------------------------------*/ + +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) typedef Impl::CudaHostAllocator allocator ; @@ -234,6 +250,8 @@ public: */ static Impl::AllocationTracker allocate_and_track( const std::string & label, const size_t size ); +#endif /* #if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) */ + /*--------------------------------*/ CudaHostPinnedSpace(); @@ -241,10 +259,10 @@ public: CudaHostPinnedSpace & operator = ( const CudaHostPinnedSpace & rhs ) = default ; ~CudaHostPinnedSpace() = default ; - /**\brief Allocate memory in the cuda space */ + /**\brief Allocate untracked memory in the space */ void * allocate( const size_t arg_alloc_size ) const ; - /**\brief Deallocate memory in the cuda space */ + /**\brief Deallocate untracked memory in the space */ void deallocate( void * const arg_alloc_ptr , const size_t arg_alloc_size ) const ; @@ -631,8 +649,24 @@ public: static SharedAllocationRecord * allocate( const Kokkos::CudaSpace & arg_space , const std::string & arg_label - , const size_t arg_alloc_size - ); + , const size_t arg_alloc_size ); + + /**\brief Allocate tracked memory in the space */ + static + void * allocate_tracked( const Kokkos::CudaSpace & arg_space + , const std::string & arg_label + , const size_t arg_alloc_size ); + + /**\brief Reallocate tracked memory in the space */ + static + void * reallocate_tracked( void * const arg_alloc_ptr + , const size_t arg_alloc_size ); + + /**\brief Deallocate tracked memory in the space */ + static + void deallocate_tracked( void * const arg_alloc_ptr ); + + static SharedAllocationRecord * get_record( void * arg_alloc_ptr ); template< typename AliasType > inline @@ -660,8 +694,6 @@ public: return ptr - reinterpret_cast( RecordBase::m_alloc_ptr ); } - static SharedAllocationRecord * get_record( void * arg_alloc_ptr ); - static void print_records( std::ostream & , const Kokkos::CudaSpace & , bool detail = false ); }; @@ -704,6 +736,24 @@ public: , const size_t arg_alloc_size ); + /**\brief Allocate tracked memory in the space */ + static + void * allocate_tracked( const Kokkos::CudaUVMSpace & arg_space + , const std::string & arg_label + , const size_t arg_alloc_size ); + + /**\brief Reallocate tracked memory in the space */ + static + void * reallocate_tracked( void * const arg_alloc_ptr + , const size_t arg_alloc_size ); + + /**\brief Deallocate tracked memory in the space */ + static + void deallocate_tracked( void * const arg_alloc_ptr ); + + static SharedAllocationRecord * get_record( void * arg_alloc_ptr ); + + template< typename AliasType > inline ::cudaTextureObject_t attach_texture_object() @@ -731,8 +781,6 @@ public: return ptr - reinterpret_cast( RecordBase::m_alloc_ptr ); } - static SharedAllocationRecord * get_record( void * arg_alloc_ptr ); - static void print_records( std::ostream & , const Kokkos::CudaUVMSpace & , bool detail = false ); }; @@ -772,6 +820,21 @@ public: , const std::string & arg_label , const size_t arg_alloc_size ); + /**\brief Allocate tracked memory in the space */ + static + void * allocate_tracked( const Kokkos::CudaHostPinnedSpace & arg_space + , const std::string & arg_label + , const size_t arg_alloc_size ); + + /**\brief Reallocate tracked memory in the space */ + static + void * reallocate_tracked( void * const arg_alloc_ptr + , const size_t arg_alloc_size ); + + /**\brief Deallocate tracked memory in the space */ + static + void deallocate_tracked( void * const arg_alloc_ptr ); + static SharedAllocationRecord * get_record( void * arg_alloc_ptr ); diff --git a/lib/kokkos/core/src/Kokkos_ExecPolicy.hpp b/lib/kokkos/core/src/Kokkos_ExecPolicy.hpp index 807cb5cb43..4f6f0f09c6 100644 --- a/lib/kokkos/core/src/Kokkos_ExecPolicy.hpp +++ b/lib/kokkos/core/src/Kokkos_ExecPolicy.hpp @@ -78,8 +78,9 @@ template< class Arg0 = void , class Arg1 = void , class Arg2 = void , class ExecSpace = // The first argument is the execution space, // otherwise use the default execution space. - typename Impl::if_c< Impl::is_execution_space< Arg0 >::value , Arg0 - , Kokkos::DefaultExecutionSpace >::type + typename std::conditional + < Impl::is_execution_space< Arg0 >::value , Arg0 + , Kokkos::DefaultExecutionSpace >::type > class RangePolicy { private: @@ -117,8 +118,8 @@ private: ) >::value }; // The work argument tag is the first or second argument - typedef typename Impl::if_c< Arg0_WorkTag , Arg0 , - typename Impl::if_c< Arg1_WorkTag , Arg1 , void + typedef typename std::conditional< Arg0_WorkTag , Arg0 , + typename std::conditional< Arg1_WorkTag , Arg1 , void >::type >::type WorkTag ; @@ -128,17 +129,18 @@ private: unsigned(DefaultIntValue) ))) }; // Only accept the integral type if the blocking is a power of two - typedef typename Impl::enable_if< Impl::is_power_of_two< Granularity >::value , - typename Impl::if_c< Arg0_IntType , Arg0 , - typename Impl::if_c< Arg1_IntType , Arg1 , - typename Impl::if_c< Arg2_IntType , Arg2 , - typename Impl::if_c< Arg0_IntConst , typename Impl::is_integral_constant::integral_type , - typename Impl::if_c< Arg1_IntConst , typename Impl::is_integral_constant::integral_type , - typename Impl::if_c< Arg2_IntConst , typename Impl::is_integral_constant::integral_type , - DefaultIntType - >::type >::type >::type - >::type >::type >::type - >::type + static_assert( Impl::is_integral_power_of_two( Granularity ) + , "RangePolicy blocking granularity must be power of two" ); + + typedef typename std::conditional< Arg0_IntType , Arg0 , + typename std::conditional< Arg1_IntType , Arg1 , + typename std::conditional< Arg2_IntType , Arg2 , + typename std::conditional< Arg0_IntConst , typename Impl::is_integral_constant::integral_type , + typename std::conditional< Arg1_IntConst , typename Impl::is_integral_constant::integral_type , + typename std::conditional< Arg2_IntConst , typename Impl::is_integral_constant::integral_type , + DefaultIntType + >::type >::type >::type + >::type >::type >::type IntType ; enum { GranularityMask = IntType(Granularity) - 1 }; @@ -187,8 +189,8 @@ public: * Typically used to partition a range over a group of threads. */ struct WorkRange { - typedef RangePolicy::work_tag work_tag ; - typedef RangePolicy::member_type member_type ; + typedef typename RangePolicy::work_tag work_tag ; + typedef typename RangePolicy::member_type member_type ; KOKKOS_INLINE_FUNCTION member_type begin() const { return m_begin ; } KOKKOS_INLINE_FUNCTION member_type end() const { return m_end ; } @@ -233,6 +235,38 @@ public: namespace Kokkos { +namespace Experimental { + +/** \brief Scratch memory request accepting per team and per thread value + * + * An instance of this class can be given as the last argument to a + * TeamPolicy constructor. It sets the amount of user requested shared + * memory for the team. + */ + +template< class MemorySpace > +class TeamScratchRequest { + size_t m_per_team; + size_t m_per_thread; + +public: + TeamScratchRequest(size_t per_team_, size_t per_thread_ = 0): + m_per_team(per_team_), m_per_thread(per_thread_) { + } + + size_t per_team() const { + return m_per_team; + } + size_t per_thread() const { + return m_per_thread; + } + size_t total(const size_t team_size) const { + return m_per_team + m_per_thread * team_size; + } +}; + +} + /** \brief Execution policy for parallel work over a league of teams of threads. * * The work functor is called for each thread of each team such that @@ -258,8 +292,9 @@ template< class Arg0 = void , class ExecSpace = // If the first argument is not an execution // then use the default execution space. - typename Impl::if_c< Impl::is_execution_space< Arg0 >::value , Arg0 - , Kokkos::DefaultExecutionSpace >::type + typename std::conditional + < Impl::is_execution_space< Arg0 >::value , Arg0 + , Kokkos::DefaultExecutionSpace >::type > class TeamPolicy { private: @@ -268,7 +303,7 @@ private: enum { Arg1_Void = Impl::is_same< Arg1 , void >::value }; enum { ArgOption_OK = Impl::StaticAssert< ( Arg0_ExecSpace || Arg1_Void ) >::value }; - typedef typename Impl::if_c< Arg0_ExecSpace , Arg1 , Arg0 >::type WorkTag ; + typedef typename std::conditional< Arg0_ExecSpace , Arg1 , Arg0 >::type WorkTag ; public: @@ -300,10 +335,20 @@ public: static int team_size_recommended( const FunctorType & , const int&); //---------------------------------------- /** \brief Construct policy with the given instance of the execution space */ - TeamPolicy( const execution_space & , int league_size_request , int team_size_request ); + TeamPolicy( const execution_space & , int league_size_request , int team_size_request , int vector_length_request = 1 ); + + TeamPolicy( const execution_space & , int league_size_request , const Kokkos::AUTO_t & , int vector_length_request = 1 ); /** \brief Construct policy with the default instance of the execution space */ - TeamPolicy( int league_size_request , int team_size_request ); + TeamPolicy( int league_size_request , int team_size_request , int vector_length_request = 1 ); + + TeamPolicy( int league_size_request , const Kokkos::AUTO_t & , int vector_length_request = 1 ); + + template + TeamPolicy( int league_size_request , int team_size_request , const Experimental::TeamScratchRequest& team_scratch_memory_request ); + + template + TeamPolicy( int league_size_request , const Kokkos::AUTO_t & , const Experimental::TeamScratchRequest& team_scratch_memory_request ); /** \brief The actual league size (number of teams) of the policy. * diff --git a/lib/kokkos/core/src/Kokkos_HBWSpace.hpp b/lib/kokkos/core/src/Kokkos_HBWSpace.hpp new file mode 100644 index 0000000000..94988e60b4 --- /dev/null +++ b/lib/kokkos/core/src/Kokkos_HBWSpace.hpp @@ -0,0 +1,327 @@ +/* +//@HEADER +// ************************************************************************ +// +// Kokkos v. 2.0 +// Copyright (2014) Sandia Corporation +// +// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, +// the U.S. Government retains certain rights in this software. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the Corporation nor the names of the +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Questions? Contact H. Carter Edwards (hcedwar@sandia.gov) +// +// ************************************************************************ +//@HEADER +*/ + +#ifndef KOKKOS_HBWSPACE_HPP +#define KOKKOS_HBWSPACE_HPP + + +#include +#include + +/*--------------------------------------------------------------------------*/ +#ifdef KOKKOS_HAVE_HBWSPACE + +namespace Kokkos { +namespace Experimental { +namespace Impl { + +/// \brief Initialize lock array for arbitrary size atomics. +/// +/// Arbitrary atomics are implemented using a hash table of locks +/// where the hash value is derived from the address of the +/// object for which an atomic operation is performed. +/// This function initializes the locks to zero (unset). +void init_lock_array_hbw_space(); + +/// \brief Aquire a lock for the address +/// +/// This function tries to aquire the lock for the hash value derived +/// from the provided ptr. If the lock is successfully aquired the +/// function returns true. Otherwise it returns false. +bool lock_address_hbw_space(void* ptr); + +/// \brief Release lock for the address +/// +/// This function releases the lock for the hash value derived +/// from the provided ptr. This function should only be called +/// after previously successfully aquiring a lock with +/// lock_address. +void unlock_address_hbw_space(void* ptr); + +} // namespace Impl +} // neamspace Experimental +} // namespace Kokkos + +namespace Kokkos { +namespace Experimental { + +/// \class HBWSpace +/// \brief Memory management for host memory. +/// +/// HBWSpace is a memory space that governs host memory. "Host" +/// memory means the usual CPU-accessible memory. +class HBWSpace { +public: + + //! Tag this class as a kokkos memory space + typedef HBWSpace memory_space ; + typedef size_t size_type ; + + /// \typedef execution_space + /// \brief Default execution space for this memory space. + /// + /// Every memory space has a default execution space. This is + /// useful for things like initializing a View (which happens in + /// parallel using the View's default execution space). +#if defined( KOKKOS_HAVE_DEFAULT_DEVICE_TYPE_OPENMP ) + typedef Kokkos::OpenMP execution_space ; +#elif defined( KOKKOS_HAVE_DEFAULT_DEVICE_TYPE_THREADS ) + typedef Kokkos::Threads execution_space ; +#elif defined( KOKKOS_HAVE_OPENMP ) + typedef Kokkos::OpenMP execution_space ; +#elif defined( KOKKOS_HAVE_PTHREAD ) + typedef Kokkos::Threads execution_space ; +#elif defined( KOKKOS_HAVE_SERIAL ) + typedef Kokkos::Serial execution_space ; +#else +# error "At least one of the following host execution spaces must be defined: Kokkos::OpenMP, Kokkos::Serial, or Kokkos::Threads. You might be seeing this message if you disabled the Kokkos::Serial device explicitly using the Kokkos_ENABLE_Serial:BOOL=OFF CMake option, but did not enable any of the other host execution space devices." +#endif + + //! This memory space preferred device_type + typedef Kokkos::Device device_type; + + /*--------------------------------*/ +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + + typedef Impl::HBWMallocAllocator allocator ; + + /** \brief Allocate a contiguous block of memory. + * + * The input label is associated with the block of memory. + * The block of memory is tracked via reference counting where + * allocation gives it a reference count of one. + */ + static Kokkos::Impl::AllocationTracker allocate_and_track( const std::string & label, const size_t size ); + +#endif /* #if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) */ + + /*--------------------------------*/ + /* Functions unique to the HBWSpace */ + static int in_parallel(); + + static void register_in_parallel( int (*)() ); + + /*--------------------------------*/ + + /**\brief Default memory space instance */ + HBWSpace(); + HBWSpace( const HBWSpace & rhs ) = default ; + HBWSpace & operator = ( const HBWSpace & ) = default ; + ~HBWSpace() = default ; + + /**\brief Non-default memory space instance to choose allocation mechansim, if available */ + + enum AllocationMechanism { STD_MALLOC , POSIX_MEMALIGN , POSIX_MMAP , INTEL_MM_ALLOC }; + + explicit + HBWSpace( const AllocationMechanism & ); + + /**\brief Allocate untracked memory in the space */ + void * allocate( const size_t arg_alloc_size ) const ; + + /**\brief Deallocate untracked memory in the space */ + void deallocate( void * const arg_alloc_ptr + , const size_t arg_alloc_size ) const ; + +private: + + AllocationMechanism m_alloc_mech ; + + friend class Kokkos::Experimental::Impl::SharedAllocationRecord< Kokkos::Experimental::HBWSpace , void > ; +}; + +} // namespace Experimental +} // namespace Kokkos + +//---------------------------------------------------------------------------- +//---------------------------------------------------------------------------- + +namespace Kokkos { +namespace Experimental { +namespace Impl { + +template<> +class SharedAllocationRecord< Kokkos::Experimental::HBWSpace , void > + : public SharedAllocationRecord< void , void > +{ +private: + + friend Kokkos::Experimental::HBWSpace ; + + typedef SharedAllocationRecord< void , void > RecordBase ; + + SharedAllocationRecord( const SharedAllocationRecord & ) = delete ; + SharedAllocationRecord & operator = ( const SharedAllocationRecord & ) = delete ; + + static void deallocate( RecordBase * ); + + /**\brief Root record for tracked allocations from this HBWSpace instance */ + static RecordBase s_root_record ; + + const Kokkos::Experimental::HBWSpace m_space ; + +protected: + + ~SharedAllocationRecord(); + SharedAllocationRecord() = default ; + + SharedAllocationRecord( const Kokkos::Experimental::HBWSpace & arg_space + , const std::string & arg_label + , const size_t arg_alloc_size + , const RecordBase::function_type arg_dealloc = & deallocate + ); + +public: + + inline + std::string get_label() const + { + return std::string( RecordBase::head()->m_label ); + } + + KOKKOS_INLINE_FUNCTION static + SharedAllocationRecord * allocate( const Kokkos::Experimental::HBWSpace & arg_space + , const std::string & arg_label + , const size_t arg_alloc_size + ) + { +#if defined( KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_HOST ) + return new SharedAllocationRecord( arg_space , arg_label , arg_alloc_size ); +#else + return (SharedAllocationRecord *) 0 ; +#endif + } + + /**\brief Allocate tracked memory in the space */ + static + void * allocate_tracked( const Kokkos::Experimental::HBWSpace & arg_space + , const std::string & arg_label + , const size_t arg_alloc_size ); + + /**\brief Reallocate tracked memory in the space */ + static + void * reallocate_tracked( void * const arg_alloc_ptr + , const size_t arg_alloc_size ); + + /**\brief Deallocate tracked memory in the space */ + static + void deallocate_tracked( void * const arg_alloc_ptr ); + + + static SharedAllocationRecord * get_record( void * arg_alloc_ptr ); + + static void print_records( std::ostream & , const Kokkos::Experimental::HBWSpace & , bool detail = false ); +}; + +} // namespace Impl +} // namespace Experimental +} // namespace Kokkos + +//---------------------------------------------------------------------------- +//---------------------------------------------------------------------------- + +namespace Kokkos { +namespace Impl { + + +template +struct DeepCopy { + DeepCopy( void * dst , const void * src , size_t n ) { + memcpy( dst , src , n ); + } + DeepCopy( const ExecutionSpace& exec, void * dst , const void * src , size_t n ) { + exec.fence(); + memcpy( dst , src , n ); + } +}; + +template +struct DeepCopy { + DeepCopy( void * dst , const void * src , size_t n ) { + memcpy( dst , src , n ); + } + DeepCopy( const ExecutionSpace& exec, void * dst , const void * src , size_t n ) { + exec.fence(); + memcpy( dst , src , n ); + } +}; + +template +struct DeepCopy { + DeepCopy( void * dst , const void * src , size_t n ) { + memcpy( dst , src , n ); + } + DeepCopy( const ExecutionSpace& exec, void * dst , const void * src , size_t n ) { + exec.fence(); + memcpy( dst , src , n ); + } +}; + +} // namespace Impl +} // namespace Kokkos + +namespace Kokkos { +namespace Impl { + +template<> +struct VerifyExecutionCanAccessMemorySpace< Kokkos::HostSpace , Kokkos::Experimental::HBWSpace > +{ + enum { value = true }; + inline static void verify( void ) { } + inline static void verify( const void * ) { } +}; + +template<> +struct VerifyExecutionCanAccessMemorySpace< Kokkos::Experimental::HBWSpace , Kokkos::HostSpace > +{ + enum { value = true }; + inline static void verify( void ) { } + inline static void verify( const void * ) { } +}; + +} // namespace Impl +} // namespace Kokkos + +#endif +#endif /* #define KOKKOS_HBWSPACE_HPP */ + diff --git a/lib/kokkos/core/src/Kokkos_HostSpace.hpp b/lib/kokkos/core/src/Kokkos_HostSpace.hpp index 2aa809e7ce..6e707f060c 100644 --- a/lib/kokkos/core/src/Kokkos_HostSpace.hpp +++ b/lib/kokkos/core/src/Kokkos_HostSpace.hpp @@ -128,6 +128,8 @@ public: //! This memory space preferred device_type typedef Kokkos::Device device_type; + /*--------------------------------*/ +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) #if defined( KOKKOS_USE_PAGE_ALIGNED_HOST_MEMORY ) typedef Impl::PageAlignedAllocator allocator ; @@ -143,6 +145,8 @@ public: */ static Impl::AllocationTracker allocate_and_track( const std::string & label, const size_t size ); +#endif /* #if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) */ + /*--------------------------------*/ /* Functions unique to the HostSpace */ static int in_parallel(); @@ -164,10 +168,10 @@ public: explicit HostSpace( const AllocationMechanism & ); - /**\brief Allocate memory in the host space */ + /**\brief Allocate untracked memory in the space */ void * allocate( const size_t arg_alloc_size ) const ; - /**\brief Deallocate memory in the host space */ + /**\brief Deallocate untracked memory in the space */ void deallocate( void * const arg_alloc_ptr , const size_t arg_alloc_size ) const ; @@ -239,6 +243,21 @@ public: #endif } + /**\brief Allocate tracked memory in the space */ + static + void * allocate_tracked( const Kokkos::HostSpace & arg_space + , const std::string & arg_label + , const size_t arg_alloc_size ); + + /**\brief Reallocate tracked memory in the space */ + static + void * reallocate_tracked( void * const arg_alloc_ptr + , const size_t arg_alloc_size ); + + /**\brief Deallocate tracked memory in the space */ + static + void deallocate_tracked( void * const arg_alloc_ptr ); + static SharedAllocationRecord * get_record( void * arg_alloc_ptr ); diff --git a/lib/kokkos/core/src/Kokkos_Layout.hpp b/lib/kokkos/core/src/Kokkos_Layout.hpp index 32822889df..e7d38a902b 100644 --- a/lib/kokkos/core/src/Kokkos_Layout.hpp +++ b/lib/kokkos/core/src/Kokkos_Layout.hpp @@ -157,10 +157,15 @@ struct LayoutStride { /// both tile dimensions are powers of two, Kokkos can optimize /// further. template < unsigned ArgN0 , unsigned ArgN1 , - bool IsPowerOfTwo = ( Impl::is_power_of_two::value && - Impl::is_power_of_two::value ) + bool IsPowerOfTwo = ( Impl::is_integral_power_of_two(ArgN0) && + Impl::is_integral_power_of_two(ArgN1) ) > struct LayoutTileLeft { + + static_assert( Impl::is_integral_power_of_two(ArgN0) && + Impl::is_integral_power_of_two(ArgN1) + , "LayoutTileLeft must be given power-of-two tile dimensions" ); + //! Tag this class as a kokkos array layout typedef LayoutTileLeft array_layout ; diff --git a/lib/kokkos/core/src/Kokkos_Macros.hpp b/lib/kokkos/core/src/Kokkos_Macros.hpp index c221c2f9f3..2386c9d2c3 100644 --- a/lib/kokkos/core/src/Kokkos_Macros.hpp +++ b/lib/kokkos/core/src/Kokkos_Macros.hpp @@ -416,5 +416,11 @@ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- +#if ( defined( _POSIX_C_SOURCE ) && _POSIX_C_SOURCE >= 200112L ) || \ + ( defined( _XOPEN_SOURCE ) && _XOPEN_SOURCE >= 600 ) +#if defined(KOKKOS_ENABLE_PERFORMANCE_POSIX_MEMALIGN) +#define KOKKOS_POSIX_MEMALIGN_AVAILABLE 1 +#endif +#endif #endif /* #ifndef KOKKOS_MACROS_HPP */ diff --git a/lib/kokkos/core/src/Kokkos_MemoryTraits.hpp b/lib/kokkos/core/src/Kokkos_MemoryTraits.hpp index b581c7da23..5ee1f16fec 100644 --- a/lib/kokkos/core/src/Kokkos_MemoryTraits.hpp +++ b/lib/kokkos/core/src/Kokkos_MemoryTraits.hpp @@ -101,9 +101,9 @@ namespace Impl { */ enum { MEMORY_ALIGNMENT = #if defined( KOKKOS_MEMORY_ALIGNMENT ) - ( 1 << Kokkos::Impl::power_of_two< KOKKOS_MEMORY_ALIGNMENT >::value ) + ( 1 << Kokkos::Impl::integral_power_of_two( KOKKOS_MEMORY_ALIGNMENT ) ) #else - ( 1 << Kokkos::Impl::power_of_two< 128 >::value ) + ( 1 << Kokkos::Impl::integral_power_of_two( 128 ) ) #endif , MEMORY_ALIGNMENT_THRESHOLD = 4 }; diff --git a/lib/kokkos/core/src/Kokkos_OpenMP.hpp b/lib/kokkos/core/src/Kokkos_OpenMP.hpp index 508da04c87..e7dbf9a0e6 100644 --- a/lib/kokkos/core/src/Kokkos_OpenMP.hpp +++ b/lib/kokkos/core/src/Kokkos_OpenMP.hpp @@ -53,6 +53,9 @@ #include #include #include +#ifdef KOKKOS_HAVE_HBWSPACE +#include +#endif #include #include #include @@ -72,12 +75,16 @@ public: //! Tag this class as a kokkos execution space typedef OpenMP execution_space ; + #ifdef KOKKOS_HAVE_HBWSPACE + typedef Experimental::HBWSpace memory_space ; + #else typedef HostSpace memory_space ; + #endif //! This execution space preferred device_type typedef Kokkos::Device device_type; typedef LayoutRight array_layout ; - typedef HostSpace::size_type size_type ; + typedef memory_space::size_type size_type ; typedef ScratchMemorySpace< OpenMP > scratch_memory_space ; diff --git a/lib/kokkos/core/src/Kokkos_Parallel.hpp b/lib/kokkos/core/src/Kokkos_Parallel.hpp index 93bffcc781..696ff4042e 100644 --- a/lib/kokkos/core/src/Kokkos_Parallel.hpp +++ b/lib/kokkos/core/src/Kokkos_Parallel.hpp @@ -207,8 +207,12 @@ void parallel_for( const ExecPolicy & policy } #endif - (void) Impl::ParallelFor< FunctorType , ExecPolicy >( Impl::CopyWithoutTracking::apply(functor) , policy ); + Kokkos::Impl::shared_allocation_tracking_claim_and_disable(); + Impl::ParallelFor< FunctorType , ExecPolicy > closure( functor , policy ); + Kokkos::Impl::shared_allocation_tracking_release_and_enable(); + closure.execute(); + #ifdef KOKKOSP_ENABLE_PROFILING if(Kokkos::Experimental::profileLibraryLoaded()) { Kokkos::Experimental::endParallelFor(kpID); @@ -235,7 +239,11 @@ void parallel_for( const size_t work_count } #endif - (void) Impl::ParallelFor< FunctorType , policy >( Impl::CopyWithoutTracking::apply(functor) , policy(0,work_count) ); + Kokkos::Impl::shared_allocation_tracking_claim_and_disable(); + Impl::ParallelFor< FunctorType , policy > closure( functor , policy(0,work_count) ); + Kokkos::Impl::shared_allocation_tracking_release_and_enable(); + + closure.execute(); #ifdef KOKKOSP_ENABLE_PROFILING if(Kokkos::Experimental::profileLibraryLoaded()) { @@ -333,7 +341,11 @@ void parallel_reduce( const ExecPolicy & policy } #endif - (void) Impl::ParallelReduce< FunctorType , ExecPolicy >( Impl::CopyWithoutTracking::apply(functor) , policy , result_view ); + Kokkos::Impl::shared_allocation_tracking_claim_and_disable(); + Impl::ParallelReduce< FunctorType , ExecPolicy > closure( functor , policy , result_view ); + Kokkos::Impl::shared_allocation_tracking_release_and_enable(); + + closure.execute(); #ifdef KOKKOSP_ENABLE_PROFILING if(Kokkos::Experimental::profileLibraryLoaded()) { @@ -376,7 +388,11 @@ void parallel_reduce( const size_t work_count } #endif - (void) Impl::ParallelReduce< FunctorType , policy >( Impl::CopyWithoutTracking::apply(functor) , policy(0,work_count) , result_view ); + Kokkos::Impl::shared_allocation_tracking_claim_and_disable(); + Impl::ParallelReduce< FunctorType , policy > closure( functor , policy(0,work_count) , result_view ); + Kokkos::Impl::shared_allocation_tracking_release_and_enable(); + + closure.execute(); #ifdef KOKKOSP_ENABLE_PROFILING if(Kokkos::Experimental::profileLibraryLoaded()) { @@ -394,7 +410,7 @@ void parallel_reduce( const ExecPolicy & policy , const ViewType & result_view , const std::string& str = "" , typename Impl::enable_if< - ( Impl::is_view::value && ! Impl::is_integral< ExecPolicy >::value + ( Kokkos::is_view::value && ! Impl::is_integral< ExecPolicy >::value #ifdef KOKKOS_HAVE_CUDA && ! Impl::is_same::value #endif @@ -408,7 +424,11 @@ void parallel_reduce( const ExecPolicy & policy } #endif - (void) Impl::ParallelReduce< FunctorType, ExecPolicy >( Impl::CopyWithoutTracking::apply(functor) , policy , Impl::CopyWithoutTracking::apply(result_view) ); + Kokkos::Impl::shared_allocation_tracking_claim_and_disable(); + Impl::ParallelReduce< FunctorType, ExecPolicy > closure( functor , policy , result_view ); + Kokkos::Impl::shared_allocation_tracking_release_and_enable(); + + closure.execute(); #ifdef KOKKOSP_ENABLE_PROFILING if(Kokkos::Experimental::profileLibraryLoaded()) { @@ -465,7 +485,11 @@ void parallel_reduce( const ExecPolicy & policy } #endif - (void) Impl::ParallelReduce< FunctorType, ExecPolicy >( Impl::CopyWithoutTracking::apply(functor) , policy , Impl::CopyWithoutTracking::apply(result_view) ); + Kokkos::Impl::shared_allocation_tracking_claim_and_disable(); + Impl::ParallelReduce< FunctorType, ExecPolicy > closure( functor , policy , result_view ); + Kokkos::Impl::shared_allocation_tracking_release_and_enable(); + + closure.execute(); #ifdef KOKKOSP_ENABLE_PROFILING if(Kokkos::Experimental::profileLibraryLoaded()) { @@ -482,7 +506,7 @@ void parallel_reduce( const size_t work_count , const FunctorType & functor , const ViewType & result_view , const std::string& str = "" - , typename Impl::enable_if<( Impl::is_view::value + , typename Impl::enable_if<( Kokkos::is_view::value #ifdef KOKKOS_HAVE_CUDA && ! Impl::is_same< typename Impl::FunctorPolicyExecutionSpace< FunctorType , void >::execution_space, @@ -503,7 +527,11 @@ void parallel_reduce( const size_t work_count } #endif - (void) Impl::ParallelReduce< FunctorType, ExecPolicy >( Impl::CopyWithoutTracking::apply(functor) , ExecPolicy(0,work_count) , Impl::CopyWithoutTracking::apply(result_view) ); + Kokkos::Impl::shared_allocation_tracking_claim_and_disable(); + Impl::ParallelReduce< FunctorType, ExecPolicy > closure( functor , ExecPolicy(0,work_count) , result_view ); + Kokkos::Impl::shared_allocation_tracking_release_and_enable(); + + closure.execute(); #ifdef KOKKOSP_ENABLE_PROFILING if(Kokkos::Experimental::profileLibraryLoaded()) { @@ -564,7 +592,11 @@ void parallel_reduce( const size_t work_count } #endif - (void) Impl::ParallelReduce< FunctorType , policy >( Impl::CopyWithoutTracking::apply(functor) , policy(0,work_count) , Impl::CopyWithoutTracking::apply(result_view) ); + Kokkos::Impl::shared_allocation_tracking_claim_and_disable(); + Impl::ParallelReduce< FunctorType , policy > closure( functor , policy(0,work_count) , result_view ); + Kokkos::Impl::shared_allocation_tracking_release_and_enable(); + + closure.execute(); #ifdef KOKKOSP_ENABLE_PROFILING if(Kokkos::Experimental::profileLibraryLoaded()) { @@ -813,7 +845,11 @@ void parallel_scan( const ExecutionPolicy & policy } #endif - Impl::ParallelScan< FunctorType , ExecutionPolicy > scan( Impl::CopyWithoutTracking::apply(functor) , policy ); + Kokkos::Impl::shared_allocation_tracking_claim_and_disable(); + Impl::ParallelScan< FunctorType , ExecutionPolicy > closure( functor , policy ); + Kokkos::Impl::shared_allocation_tracking_release_and_enable(); + + closure.execute(); #ifdef KOKKOSP_ENABLE_PROFILING if(Kokkos::Experimental::profileLibraryLoaded()) { @@ -842,7 +878,11 @@ void parallel_scan( const size_t work_count } #endif - (void) Impl::ParallelScan< FunctorType , policy >( Impl::CopyWithoutTracking::apply(functor) , policy(0,work_count) ); + Kokkos::Impl::shared_allocation_tracking_claim_and_disable(); + Impl::ParallelScan< FunctorType , policy > closure( functor , policy(0,work_count) ); + Kokkos::Impl::shared_allocation_tracking_release_and_enable(); + + closure.execute(); #ifdef KOKKOSP_ENABLE_PROFILING if(Kokkos::Experimental::profileLibraryLoaded()) { diff --git a/lib/kokkos/core/src/Kokkos_Serial.hpp b/lib/kokkos/core/src/Kokkos_Serial.hpp index 5773a18b3f..8be973d442 100644 --- a/lib/kokkos/core/src/Kokkos_Serial.hpp +++ b/lib/kokkos/core/src/Kokkos_Serial.hpp @@ -151,7 +151,7 @@ public: static void finalize() {} //! Print configuration information to the given output stream. - static void print_configuration( std::ostream & , const bool detail = false ) {} + static void print_configuration( std::ostream & , const bool /* detail */ = false ) {} //-------------------------------------------------------------------------- @@ -295,6 +295,7 @@ class TeamPolicy< Arg0 , Arg1 , Kokkos::Serial > private: const int m_league_size ; + const int m_scratch_size ; public: @@ -326,15 +327,55 @@ public: inline int team_size() const { return 1 ; } inline int league_size() const { return m_league_size ; } + inline size_t scratch_size() const { return m_scratch_size ; } /** \brief Specify league size, request team size */ - TeamPolicy( execution_space & , int league_size_request , int /* team_size_request */ , int vector_length_request = 1 ) + TeamPolicy( execution_space & + , int league_size_request + , int /* team_size_request */ + , int /* vector_length_request */ = 1 ) : m_league_size( league_size_request ) - { (void) vector_length_request; } + , m_scratch_size ( 0 ) + {} - TeamPolicy( int league_size_request , int /* team_size_request */ , int vector_length_request = 1 ) + TeamPolicy( execution_space & + , int league_size_request + , const Kokkos::AUTO_t & /* team_size_request */ + , int /* vector_length_request */ = 1 ) : m_league_size( league_size_request ) - { (void) vector_length_request; } + , m_scratch_size ( 0 ) + {} + + TeamPolicy( int league_size_request + , int /* team_size_request */ + , int /* vector_length_request */ = 1 ) + : m_league_size( league_size_request ) + , m_scratch_size ( 0 ) + {} + + TeamPolicy( int league_size_request + , const Kokkos::AUTO_t & /* team_size_request */ + , int /* vector_length_request */ = 1 ) + : m_league_size( league_size_request ) + , m_scratch_size ( 0 ) + {} + + template + TeamPolicy( int league_size_request + , int /* team_size_request */ + , const Experimental::TeamScratchRequest & scratch_request ) + : m_league_size(league_size_request) + , m_scratch_size(scratch_request.total(1)) + {} + + + template + TeamPolicy( int league_size_request + , const Kokkos::AUTO_t & /* team_size_request */ + , const Experimental::TeamScratchRequest & scratch_request ) + : m_league_size(league_size_request) + , m_scratch_size(scratch_request.total(1)) + {} typedef Impl::SerialTeamMember member_type ; }; @@ -346,53 +387,69 @@ public: /*--------------------------------------------------------------------------*/ /*--------------------------------------------------------------------------*/ +/* Parallel patterns for Kokkos::Serial with RangePolicy */ namespace Kokkos { namespace Impl { template< class FunctorType , class Arg0 , class Arg1 , class Arg2 > -class ParallelFor< FunctorType , Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Serial > > +class ParallelFor< FunctorType + , Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Serial > + > { private: typedef Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Serial > Policy ; -public: - // work tag is void - template< class PType > - inline - ParallelFor( typename Impl::enable_if< - ( Impl::is_same< PType , Policy >::value && - Impl::is_same< typename PType::work_tag , void >::value - ), const FunctorType & >::type functor - , const PType & policy ) + const FunctorType m_functor ; + const Policy m_policy ; + + template< class TagType > + KOKKOS_INLINE_FUNCTION + typename std::enable_if< std::is_same< TagType , void >::value >::type + exec() const { - const typename PType::member_type e = policy.end(); - for ( typename PType::member_type i = policy.begin() ; i < e ; ++i ) { - functor( i ); + const typename Policy::member_type e = m_policy.end(); + for ( typename Policy::member_type i = m_policy.begin() ; i < e ; ++i ) { + m_functor( i ); } } - // work tag is non-void - template< class PType > - inline - ParallelFor( typename Impl::enable_if< - ( Impl::is_same< PType , Policy >::value && - ! Impl::is_same< typename PType::work_tag , void >::value - ), const FunctorType & >::type functor - , const PType & policy ) + template< class TagType > + KOKKOS_INLINE_FUNCTION + typename std::enable_if< ! std::is_same< TagType , void >::value >::type + exec() const { - const typename PType::member_type e = policy.end(); - for ( typename PType::member_type i = policy.begin() ; i < e ; ++i ) { - functor( typename PType::work_tag() , i ); + const TagType t{} ; + const typename Policy::member_type e = m_policy.end(); + for ( typename Policy::member_type i = m_policy.begin() ; i < e ; ++i ) { + m_functor( t , i ); } } + +public: + + inline + void execute() const + { this-> template exec< typename Policy::work_tag >(); } + + inline + ParallelFor( const FunctorType & arg_functor + , const Policy & arg_policy ) + : m_functor( arg_functor ) + , m_policy( arg_policy ) + {} }; +/*--------------------------------------------------------------------------*/ + template< class FunctorType , class Arg0 , class Arg1 , class Arg2 > -class ParallelReduce< FunctorType , Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Serial > > +class ParallelReduce< FunctorType + , Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Serial > + > { -public: +private: + typedef Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Serial > Policy ; typedef typename Policy::work_tag WorkTag ; typedef Kokkos::Impl::FunctorValueTraits< FunctorType , WorkTag > ValueTraits ; @@ -401,123 +458,136 @@ public: typedef typename ValueTraits::pointer_type pointer_type ; typedef typename ValueTraits::reference_type reference_type ; - // Work tag is void - template< class ViewType , class PType > - ParallelReduce( typename Impl::enable_if< - ( Impl::is_view< ViewType >::value && - Impl::is_same< typename ViewType::memory_space , HostSpace >::value && - Impl::is_same< PType , Policy >::value && - Impl::is_same< typename PType::work_tag , void >::value - ), const FunctorType & >::type functor - , const PType & policy - , const ViewType & result - ) + const FunctorType m_functor ; + const Policy m_policy ; + const pointer_type m_result_ptr ; + + + template< class TagType > + inline + typename std::enable_if< std::is_same< TagType , void >::value >::type + exec( pointer_type ptr ) const { - pointer_type result_ptr = result.ptr_on_device(); + reference_type update = ValueInit::init( m_functor , ptr ); - if ( ! result_ptr ) { - result_ptr = (pointer_type) - Kokkos::Serial::scratch_memory_resize( ValueTraits::value_size( functor ) , 0 ); + const typename Policy::member_type e = m_policy.end(); + for ( typename Policy::member_type i = m_policy.begin() ; i < e ; ++i ) { + m_functor( i , update ); } - reference_type update = ValueInit::init( functor , result_ptr ); - - const typename PType::member_type e = policy.end(); - for ( typename PType::member_type i = policy.begin() ; i < e ; ++i ) { - functor( i , update ); - } - - Kokkos::Impl::FunctorFinal< FunctorType , WorkTag >::final( functor , result_ptr ); + Kokkos::Impl::FunctorFinal< FunctorType , TagType >:: + final( m_functor , ptr ); } - // Work tag is non-void - template< class ViewType , class PType > - ParallelReduce( typename Impl::enable_if< - ( Impl::is_view< ViewType >::value && - Impl::is_same< typename ViewType::memory_space , HostSpace >::value && - Impl::is_same< PType , Policy >::value && - ! Impl::is_same< typename PType::work_tag , void >::value - ), const FunctorType & >::type functor - , const PType & policy - , const ViewType & result - ) + template< class TagType > + inline + typename std::enable_if< ! std::is_same< TagType , void >::value >::type + exec( pointer_type ptr ) const { - pointer_type result_ptr = result.ptr_on_device(); + const TagType t{} ; + reference_type update = ValueInit::init( m_functor , ptr ); - if ( ! result_ptr ) { - result_ptr = (pointer_type) - Kokkos::Serial::scratch_memory_resize( ValueTraits::value_size( functor ) , 0 ); + const typename Policy::member_type e = m_policy.end(); + for ( typename Policy::member_type i = m_policy.begin() ; i < e ; ++i ) { + m_functor( t , i , update ); } - typename ValueTraits::reference_type update = ValueInit::init( functor , result_ptr ); + Kokkos::Impl::FunctorFinal< FunctorType , TagType >:: + final( m_functor , ptr ); + } - const typename PType::member_type e = policy.end(); - for ( typename PType::member_type i = policy.begin() ; i < e ; ++i ) { - functor( typename PType::work_tag() , i , update ); - } +public: - Kokkos::Impl::FunctorFinal< FunctorType , WorkTag >::final( functor , result_ptr ); + inline + void execute() const + { + pointer_type ptr = (pointer_type) Kokkos::Serial::scratch_memory_resize + ( ValueTraits::value_size( m_functor ) , 0 ); + + this-> template exec< WorkTag >( m_result_ptr ? m_result_ptr : ptr ); + } + + template< class ViewType > + ParallelReduce( const FunctorType & arg_functor + , const Policy & arg_policy + , const ViewType & arg_result ) + : m_functor( arg_functor ) + , m_policy( arg_policy ) + , m_result_ptr( arg_result.ptr_on_device() ) + { + static_assert( Kokkos::is_view< ViewType >::value + , "Reduction result on Kokkos::Serial must be a Kokkos::View" ); + + static_assert( std::is_same< typename ViewType::memory_space + , Kokkos::HostSpace >::value + , "Reduction result on Kokkos::Serial must be a Kokkos::View in HostSpace" ); } }; +/*--------------------------------------------------------------------------*/ + template< class FunctorType , class Arg0 , class Arg1 , class Arg2 > -class ParallelScan< FunctorType , Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Serial > > +class ParallelScan< FunctorType + , Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Serial > + > { private: typedef Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Serial > Policy ; - - typedef Kokkos::Impl::FunctorValueTraits< FunctorType , typename Policy::work_tag > ValueTraits ; - typedef Kokkos::Impl::FunctorValueInit< FunctorType , typename Policy::work_tag > ValueInit ; - -public: + typedef typename Policy::work_tag WorkTag ; + typedef Kokkos::Impl::FunctorValueTraits< FunctorType , WorkTag > ValueTraits ; + typedef Kokkos::Impl::FunctorValueInit< FunctorType , WorkTag > ValueInit ; typedef typename ValueTraits::pointer_type pointer_type ; typedef typename ValueTraits::reference_type reference_type ; - // work tag is void - template< class PType > + const FunctorType m_functor ; + const Policy m_policy ; + + template< class TagType > inline - ParallelScan( typename Impl::enable_if< - ( Impl::is_same< PType , Policy >::value && - Impl::is_same< typename PType::work_tag , void >::value - ), const FunctorType & >::type functor - , const PType & policy ) + typename std::enable_if< std::is_same< TagType , void >::value >::type + exec( pointer_type ptr ) const { - pointer_type result_ptr = (pointer_type) - Kokkos::Serial::scratch_memory_resize( ValueTraits::value_size( functor ) , 0 ); + reference_type update = ValueInit::init( m_functor , ptr ); - reference_type update = ValueInit::init( functor , result_ptr ); - - const typename PType::member_type e = policy.end(); - for ( typename PType::member_type i = policy.begin() ; i < e ; ++i ) { - functor( i , update , true ); + const typename Policy::member_type e = m_policy.end(); + for ( typename Policy::member_type i = m_policy.begin() ; i < e ; ++i ) { + m_functor( i , update , true ); } - - Kokkos::Impl::FunctorFinal< FunctorType , typename Policy::work_tag >::final( functor , result_ptr ); } - // work tag is non-void - template< class PType > + template< class TagType > inline - ParallelScan( typename Impl::enable_if< - ( Impl::is_same< PType , Policy >::value && - ! Impl::is_same< typename PType::work_tag , void >::value - ), const FunctorType & >::type functor - , const PType & policy ) + typename std::enable_if< ! std::is_same< TagType , void >::value >::type + exec( pointer_type ptr ) const { - pointer_type result_ptr = (pointer_type) - Kokkos::Serial::scratch_memory_resize( ValueTraits::value_size( functor ) , 0 ); + const TagType t{} ; + reference_type update = ValueInit::init( m_functor , ptr ); - reference_type update = ValueInit::init( functor , result_ptr ); - - const typename PType::member_type e = policy.end(); - for ( typename PType::member_type i = policy.begin() ; i < e ; ++i ) { - functor( typename PType::work_tag() , i , update , true ); + const typename Policy::member_type e = m_policy.end(); + for ( typename Policy::member_type i = m_policy.begin() ; i < e ; ++i ) { + m_functor( t , i , update , true ); } - - Kokkos::Impl::FunctorFinal< FunctorType , typename Policy::work_tag >::final( functor , result_ptr ); } + +public: + + inline + void execute() const + { + pointer_type ptr = (pointer_type) + Kokkos::Serial::scratch_memory_resize( ValueTraits::value_size( m_functor ) , 0 ); + this-> template exec< WorkTag >( ptr ); + } + + inline + ParallelScan( const FunctorType & arg_functor + , const Policy & arg_policy + ) + : m_functor( arg_functor ) + , m_policy( arg_policy ) + {} }; } // namespace Impl @@ -525,112 +595,157 @@ public: /*--------------------------------------------------------------------------*/ /*--------------------------------------------------------------------------*/ +/* Parallel patterns for Kokkos::Serial with TeamPolicy */ namespace Kokkos { namespace Impl { template< class FunctorType , class Arg0 , class Arg1 > -class ParallelFor< FunctorType , Kokkos::TeamPolicy< Arg0 , Arg1 , Kokkos::Serial > > +class ParallelFor< FunctorType + , Kokkos::TeamPolicy< Arg0 , Arg1 , Kokkos::Serial > + > { private: typedef Kokkos::TeamPolicy< Arg0 , Arg1 , Kokkos::Serial > Policy ; + typedef typename Policy::member_type Member ; + + const FunctorType m_functor ; + const int m_league ; + const int m_shared ; template< class TagType > - KOKKOS_FORCEINLINE_FUNCTION static - void driver( typename Impl::enable_if< Impl::is_same< TagType , void >::value , - const FunctorType & >::type functor - , const typename Policy::member_type & member ) - { functor( member ); } - - template< class TagType > - KOKKOS_FORCEINLINE_FUNCTION static - void driver( typename Impl::enable_if< ! Impl::is_same< TagType , void >::value , - const FunctorType & >::type functor - , const typename Policy::member_type & member ) - { functor( TagType() , member ); } - -public: - - ParallelFor( const FunctorType & functor - , const Policy & policy ) + inline + typename std::enable_if< std::is_same< TagType , void >::value >::type + exec() const { - const int shared_size = FunctorTeamShmemSize< FunctorType >::value( functor , policy.team_size() ); - - Kokkos::Serial::scratch_memory_resize( 0 , shared_size ); - - for ( int ileague = 0 ; ileague < policy.league_size() ; ++ileague ) { - ParallelFor::template driver< typename Policy::work_tag > - ( functor , typename Policy::member_type(ileague,policy.league_size(),shared_size) ); - // functor( typename Policy::member_type(ileague,policy.league_size(),shared_size) ); + for ( int ileague = 0 ; ileague < m_league ; ++ileague ) { + m_functor( Member(ileague,m_league,m_shared) ); } } + + template< class TagType > + inline + typename std::enable_if< ! std::is_same< TagType , void >::value >::type + exec() const + { + const TagType t{} ; + for ( int ileague = 0 ; ileague < m_league ; ++ileague ) { + m_functor( t , Member(ileague,m_league,m_shared) ); + } + } + +public: + + inline + void execute() const + { + Kokkos::Serial::scratch_memory_resize( 0 , m_shared ); + this-> template exec< typename Policy::work_tag >(); + } + + ParallelFor( const FunctorType & arg_functor + , const Policy & arg_policy ) + : m_functor( arg_functor ) + , m_league( arg_policy.league_size() ) + , m_shared( arg_policy.scratch_size() + FunctorTeamShmemSize< FunctorType >::value( arg_functor , 1 ) ) + { } }; +/*--------------------------------------------------------------------------*/ + template< class FunctorType , class Arg0 , class Arg1 > -class ParallelReduce< FunctorType , Kokkos::TeamPolicy< Arg0 , Arg1 , Kokkos::Serial > > +class ParallelReduce< FunctorType + , Kokkos::TeamPolicy< Arg0 , Arg1 , Kokkos::Serial > + > { private: typedef Kokkos::TeamPolicy< Arg0 , Arg1 , Kokkos::Serial > Policy ; - typedef Kokkos::Impl::FunctorValueTraits< FunctorType , typename Policy::work_tag > ValueTraits ; - typedef Kokkos::Impl::FunctorValueInit< FunctorType , typename Policy::work_tag > ValueInit ; - -public: + typedef typename Policy::member_type Member ; + typedef typename Policy::work_tag WorkTag ; + typedef Kokkos::Impl::FunctorValueTraits< FunctorType , WorkTag > ValueTraits ; + typedef Kokkos::Impl::FunctorValueInit< FunctorType , WorkTag > ValueInit ; typedef typename ValueTraits::pointer_type pointer_type ; typedef typename ValueTraits::reference_type reference_type ; -private: + const FunctorType m_functor ; + const int m_league ; + const int m_shared ; + pointer_type m_result_ptr ; template< class TagType > - KOKKOS_FORCEINLINE_FUNCTION static - void driver( typename Impl::enable_if< Impl::is_same< TagType , void >::value , - const FunctorType & >::type functor - , const typename Policy::member_type & member - , reference_type update ) - { functor( member , update ); } + inline + typename std::enable_if< std::is_same< TagType , void >::value >::type + exec( pointer_type ptr ) const + { + reference_type update = ValueInit::init( m_functor , ptr ); + + for ( int ileague = 0 ; ileague < m_league ; ++ileague ) { + m_functor( Member(ileague,m_league,m_shared) , update ); + } + + Kokkos::Impl::FunctorFinal< FunctorType , TagType >:: + final( m_functor , ptr ); + } template< class TagType > - KOKKOS_FORCEINLINE_FUNCTION static - void driver( typename Impl::enable_if< ! Impl::is_same< TagType , void >::value , - const FunctorType & >::type functor - , const typename Policy::member_type & member - , reference_type update ) - { functor( TagType() , member , update ); } + inline + typename std::enable_if< ! std::is_same< TagType , void >::value >::type + exec( pointer_type ptr ) const + { + const TagType t{} ; + + reference_type update = ValueInit::init( m_functor , ptr ); + + for ( int ileague = 0 ; ileague < m_league ; ++ileague ) { + m_functor( t , Member(ileague,m_league,m_shared) , update ); + } + + Kokkos::Impl::FunctorFinal< FunctorType , TagType >:: + final( m_functor , ptr ); + } public: - template< class ViewType > - ParallelReduce( const FunctorType & functor - , const Policy & policy - , const ViewType & result - ) + inline + void execute() const { - const int reduce_size = ValueTraits::value_size( functor ); - const int shared_size = FunctorTeamShmemSize< FunctorType >::value( functor , policy.team_size() ); - void * const scratch_reduce = Kokkos::Serial::scratch_memory_resize( reduce_size , shared_size ); + pointer_type ptr = (pointer_type) Kokkos::Serial::scratch_memory_resize + ( ValueTraits::value_size( m_functor ) , m_shared ); - const pointer_type result_ptr = - result.ptr_on_device() ? result.ptr_on_device() - : (pointer_type) scratch_reduce ; - - reference_type update = ValueInit::init( functor , result_ptr ); - - for ( int ileague = 0 ; ileague < policy.league_size() ; ++ileague ) { - ParallelReduce::template driver< typename Policy::work_tag > - ( functor , typename Policy::member_type(ileague,policy.league_size(),shared_size) , update ); - } - - Kokkos::Impl::FunctorFinal< FunctorType , typename Policy::work_tag >::final( functor , result_ptr ); + this-> template exec< WorkTag >( m_result_ptr ? m_result_ptr : ptr ); } + + template< class ViewType > + ParallelReduce( const FunctorType & arg_functor + , const Policy & arg_policy + , const ViewType & arg_result + ) + : m_functor( arg_functor ) + , m_league( arg_policy.league_size() ) + , m_shared( arg_policy.scratch_size() + FunctorTeamShmemSize< FunctorType >::value( m_functor , 1 ) ) + , m_result_ptr( arg_result.ptr_on_device() ) + { + static_assert( Kokkos::is_view< ViewType >::value + , "Reduction result on Kokkos::Serial must be a Kokkos::View" ); + + static_assert( std::is_same< typename ViewType::memory_space + , Kokkos::HostSpace >::value + , "Reduction result on Kokkos::Serial must be a Kokkos::View in HostSpace" ); + } + }; } // namespace Impl } // namespace Kokkos -namespace Kokkos { +/*--------------------------------------------------------------------------*/ +/*--------------------------------------------------------------------------*/ +/* Nested parallel patterns for Kokkos::Serial with TeamPolicy */ +namespace Kokkos { namespace Impl { template @@ -739,8 +854,6 @@ void parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct()); } -#ifdef KOKKOS_HAVE_CXX11 - /** \brief Intra-thread vector parallel_reduce. Executes lambda(iType i, ValueType & val) for each i=0..N-1. * * The range i=0..N-1 is mapped to all vector lanes of the the calling thread and a reduction of @@ -764,8 +877,6 @@ void parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct(join)); } -#endif // KOKKOS_HAVE_CXX11 - } //namespace Kokkos namespace Kokkos { diff --git a/lib/kokkos/core/src/Kokkos_View.hpp b/lib/kokkos/core/src/Kokkos_View.hpp index 531218b5d0..2f93f35412 100644 --- a/lib/kokkos/core/src/Kokkos_View.hpp +++ b/lib/kokkos/core/src/Kokkos_View.hpp @@ -47,11 +47,12 @@ #include #include #include -#include -#include #if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) +#include +#include + #include #include #include @@ -444,14 +445,14 @@ template< class DataType , typename ViewTraits::specialize > class View ; -namespace Impl { - template< class C > -struct is_view : public bool_< false > {}; +struct is_view : public Impl::bool_< false > {}; template< class D , class A1 , class A2 , class A3 , class S > -struct is_view< View< D , A1 , A2 , A3 , S > > : public bool_< true > {}; +struct is_view< View< D , A1 , A2 , A3 , S > > : public Impl::bool_< true > {}; +namespace Impl { +using Kokkos::is_view ; } //---------------------------------------------------------------------------- @@ -952,33 +953,37 @@ public: Impl::ViewError::scalar_operator_called_from_non_scalar_view > if_scalar_operator ; + typedef Impl::if_c< traits::rank == 0 , + reference_type , + Impl::ViewError::scalar_operator_called_from_non_scalar_view > + if_scalar_operator_return ; KOKKOS_INLINE_FUNCTION const View & operator = ( const typename if_scalar_operator::type & rhs ) const { KOKKOS_RESTRICT_EXECUTION_TO_DATA( typename traits::memory_space , ptr_on_device() ); - *m_ptr_on_device = if_scalar_operator::select( rhs ); + m_ptr_on_device[ 0 ] = if_scalar_operator::select( rhs ); return *this ; } KOKKOS_FORCEINLINE_FUNCTION - operator typename if_scalar_operator::type & () const + operator typename if_scalar_operator_return::type () const { KOKKOS_RESTRICT_EXECUTION_TO_DATA( typename traits::memory_space , ptr_on_device() ); - return if_scalar_operator::select( *m_ptr_on_device ); + return if_scalar_operator_return::select( m_ptr_on_device[ 0 ] ); } KOKKOS_FORCEINLINE_FUNCTION - typename if_scalar_operator::type & operator()() const + typename if_scalar_operator_return::type operator()() const { KOKKOS_RESTRICT_EXECUTION_TO_DATA( typename traits::memory_space , ptr_on_device() ); - return if_scalar_operator::select( *m_ptr_on_device ); + return if_scalar_operator_return::select( m_ptr_on_device[ 0 ] ); } KOKKOS_FORCEINLINE_FUNCTION - typename if_scalar_operator::type & operator*() const + typename if_scalar_operator_return::type operator*() const { KOKKOS_RESTRICT_EXECUTION_TO_DATA( typename traits::memory_space , ptr_on_device() ); - return if_scalar_operator::select( *m_ptr_on_device ); + return if_scalar_operator_return::select( m_ptr_on_device[ 0 ] ); } //------------------------------------ @@ -1849,6 +1854,8 @@ void resize( View & v , Impl::ViewRemap< view_type , view_type >( v_resized , v ); + view_type::execution_space::fence(); + v = v_resized ; } @@ -2092,27 +2099,10 @@ struct ALL { KOKKOS_INLINE_FUNCTION ALL(){} }; //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- -#include - -#else - -// Must define before includng -namespace Kokkos { -namespace Experimental { -namespace Impl { -struct ALL_t ; -} -} -using ALL = Experimental::Impl::ALL_t ; -} - -#include -#include +#endif /* #if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) */ #include -#endif /* #if defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) */ - //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- diff --git a/lib/kokkos/core/src/Kokkos_hwloc.hpp b/lib/kokkos/core/src/Kokkos_hwloc.hpp index a0b007f642..ff713c9523 100644 --- a/lib/kokkos/core/src/Kokkos_hwloc.hpp +++ b/lib/kokkos/core/src/Kokkos_hwloc.hpp @@ -1,13 +1,13 @@ /* //@HEADER // ************************************************************************ -// +// // Kokkos v. 2.0 // Copyright (2014) Sandia Corporation -// +// // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, // the U.S. Government retains certain rights in this software. -// +// // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -36,7 +36,7 @@ // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Questions? Contact H. Carter Edwards (hcedwar@sandia.gov) -// +// // ************************************************************************ //@HEADER */ @@ -109,7 +109,7 @@ unsigned thread_mapping( const char * const label , /** \brief Query core-coordinate of the current thread * with respect to the core_topology. * - * As long as the thread is running within the + * As long as the thread is running within the * process binding the following condition holds. * * core_coordinate.first < core_topology.first @@ -120,6 +120,10 @@ std::pair get_this_thread_coordinate(); /** \brief Bind the current thread to a core. */ bool bind_this_thread( const std::pair ); + +/** \brief Can hwloc bind threads? */ +bool can_bind_threads(); + /** \brief Bind the current thread to one of the cores in the list. * Set that entry to (~0,~0) and return the index. * If binding fails return ~0. diff --git a/lib/kokkos/core/src/Makefile b/lib/kokkos/core/src/Makefile index 8bb3508592..e7dc1ebeec 100644 --- a/lib/kokkos/core/src/Makefile +++ b/lib/kokkos/core/src/Makefile @@ -4,14 +4,14 @@ PREFIX ?= /usr/local/lib/kokkos default: messages build-lib echo "End Build" - + include $(KOKKOS_PATH)/Makefile.kokkos ifeq ($(KOKKOS_INTERNAL_USE_CUDA), 1) - CXX = nvcc_wrapper + CXX = $(NVCC_WRAPPER) CXXFLAGS ?= -O3 - LINK = nvcc_wrapper + LINK = $(NVCC_WRAPPER) LINKFLAGS ?= else CXX ?= g++ @@ -62,8 +62,10 @@ build-makefile-kokkos: echo "KOKKOS_DEBUG = $(KOKKOS_DEBUG)" >> Makefile.kokkos echo "KOKKOS_USE_TPLS = $(KOKKOS_USE_TPLS)" >> Makefile.kokkos echo "KOKKOS_CXX_STANDARD = $(KOKKOS_CXX_STANDARD)" >> Makefile.kokkos + echo "KOKKOS_OPTIONS = $(KOKKOS_OPTIONS)" >> Makefile.kokkos echo "KOKKOS_CUDA_OPTIONS = $(KOKKOS_CUDA_OPTIONS)" >> Makefile.kokkos echo "CXX ?= $(CXX)" >> Makefile.kokkos + echo "NVCC_WRAPPER ?= $(PREFIX)/bin/nvcc_wrapper" >> Makefile.kokkos echo "" >> Makefile.kokkos echo "#Source and Header files of Kokkos relative to KOKKOS_PATH" >> Makefile.kokkos echo "KOKKOS_HEADERS = $(KOKKOS_HEADERS)" >> Makefile.kokkos @@ -90,6 +92,7 @@ build-lib: build-makefile-kokkos $(KOKKOS_LINK_DEPENDS) mkdir: mkdir -p $(PREFIX) + mkdir -p $(PREFIX)/bin mkdir -p $(PREFIX)/include mkdir -p $(PREFIX)/lib mkdir -p $(PREFIX)/include/impl @@ -97,7 +100,7 @@ mkdir: copy-cuda: mkdir mkdir -p $(PREFIX)/include/Cuda cp $(KOKKOS_HEADERS_CUDA) $(PREFIX)/include/Cuda - + copy-threads: mkdir mkdir -p $(PREFIX)/include/Threads cp $(KOKKOS_HEADERS_THREADS) $(PREFIX)/include/Threads @@ -111,13 +114,14 @@ copy-openmp: mkdir cp $(KOKKOS_HEADERS_OPENMP) $(PREFIX)/include/OpenMP install: mkdir $(CONDITIONAL_COPIES) build-lib + cp $(NVCC_WRAPPER) $(PREFIX)/bin cp $(KOKKOS_HEADERS_INCLUDE) $(PREFIX)/include cp $(KOKKOS_HEADERS_INCLUDE_IMPL) $(PREFIX)/include/impl cp Makefile.kokkos $(PREFIX) cp libkokkos.a $(PREFIX)/lib cp KokkosCore_config.h $(PREFIX)/include - + clean: kokkos-clean rm Makefile.kokkos diff --git a/lib/kokkos/core/src/OpenMP/Kokkos_OpenMP_Parallel.hpp b/lib/kokkos/core/src/OpenMP/Kokkos_OpenMP_Parallel.hpp index f8393611e4..f1a8397e95 100644 --- a/lib/kokkos/core/src/OpenMP/Kokkos_OpenMP_Parallel.hpp +++ b/lib/kokkos/core/src/OpenMP/Kokkos_OpenMP_Parallel.hpp @@ -57,41 +57,57 @@ namespace Kokkos { namespace Impl { template< class FunctorType , class Arg0 , class Arg1 , class Arg2 > -class ParallelFor< FunctorType , Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::OpenMP > > +class ParallelFor< FunctorType + , Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::OpenMP > + > { private: typedef Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::OpenMP > Policy ; + typedef typename Policy::work_tag WorkTag ; + typedef typename Policy::WorkRange WorkRange ; + typedef typename Policy::member_type Member ; - template< class PType > - KOKKOS_FORCEINLINE_FUNCTION static - void driver( typename Impl::enable_if< Impl::is_same< typename PType::work_tag , void >::value , - const FunctorType & >::type functor - , const PType & range ) + const FunctorType m_functor ; + const Policy m_policy ; + + template< class TagType > + inline static + typename std::enable_if< std::is_same< TagType , void >::value >::type + exec_range( const FunctorType & functor + , const Member ibeg , const Member iend ) { - const typename PType::member_type work_end = range.end(); - for ( typename PType::member_type iwork = range.begin() ; iwork < work_end ; ++iwork ) { + #ifdef KOKKOS_OPT_RANGE_AGGRESSIVE_VECTORIZATION + #ifdef KOKKOS_HAVE_PRAGMA_IVDEP + #pragma ivdep + #endif + #endif + for ( Member iwork = ibeg ; iwork < iend ; ++iwork ) { functor( iwork ); } } - template< class PType > - KOKKOS_FORCEINLINE_FUNCTION static - void driver( typename Impl::enable_if< ! Impl::is_same< typename PType::work_tag , void >::value , - const FunctorType & >::type functor - , const PType & range ) + template< class TagType > + inline static + typename std::enable_if< ! std::is_same< TagType , void >::value >::type + exec_range( const FunctorType & functor + , const Member ibeg , const Member iend ) { - const typename PType::member_type work_end = range.end(); - for ( typename PType::member_type iwork = range.begin() ; iwork < work_end ; ++iwork ) { - functor( typename PType::work_tag() , iwork ); + const TagType t{} ; + #ifdef KOKKOS_OPT_RANGE_AGGRESSIVE_VECTORIZATION + #ifdef KOKKOS_HAVE_PRAGMA_IVDEP + #pragma ivdep + #endif + #endif + for ( Member iwork = ibeg ; iwork < iend ; ++iwork ) { + functor( t , iwork ); } } public: inline - ParallelFor( const FunctorType & functor - , const Policy & policy ) + void execute() const { OpenMPexec::verify_is_process("Kokkos::OpenMP parallel_for"); OpenMPexec::verify_initialized("Kokkos::OpenMP parallel_for"); @@ -99,10 +115,20 @@ public: #pragma omp parallel { OpenMPexec & exec = * OpenMPexec::get_thread_omp(); - driver( functor , typename Policy::WorkRange( policy , exec.pool_rank() , exec.pool_size() ) ); + + const WorkRange range( m_policy, exec.pool_rank(), exec.pool_size() ); + + ParallelFor::template exec_range< WorkTag >( m_functor , range.begin() , range.end() ); } /* END #pragma omp parallel */ } + + inline + ParallelFor( const FunctorType & arg_functor + , const Policy & arg_policy ) + : m_functor( arg_functor ) + , m_policy( arg_policy ) + {} }; } // namespace Impl @@ -115,90 +141,119 @@ namespace Kokkos { namespace Impl { template< class FunctorType , class Arg0 , class Arg1 , class Arg2 > -class ParallelReduce< FunctorType , Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::OpenMP > > +class ParallelReduce< FunctorType + , Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::OpenMP > + > { private: typedef Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::OpenMP > Policy ; - typedef typename Policy::work_tag WorkTag ; - typedef Kokkos::Impl::FunctorValueTraits< FunctorType , WorkTag > ValueTraits ; - typedef Kokkos::Impl::FunctorValueInit< FunctorType , WorkTag > ValueInit ; - typedef Kokkos::Impl::FunctorValueJoin< FunctorType , WorkTag > ValueJoin ; + + typedef typename Policy::work_tag WorkTag ; + typedef typename Policy::WorkRange WorkRange ; + typedef typename Policy::member_type Member ; + + typedef Kokkos::Impl::FunctorValueTraits< FunctorType, WorkTag > ValueTraits ; + typedef Kokkos::Impl::FunctorValueInit< FunctorType, WorkTag > ValueInit ; + typedef Kokkos::Impl::FunctorValueJoin< FunctorType, WorkTag > ValueJoin ; typedef typename ValueTraits::pointer_type pointer_type ; typedef typename ValueTraits::reference_type reference_type ; - template< class PType > - KOKKOS_FORCEINLINE_FUNCTION static - void driver( typename Impl::enable_if< Impl::is_same< typename PType::work_tag , void >::value , - const FunctorType & >::type functor - , reference_type update - , const PType & range ) + const FunctorType m_functor ; + const Policy m_policy ; + const pointer_type m_result_ptr ; + + template< class TagType > + inline static + typename std::enable_if< std::is_same< TagType , void >::value >::type + exec_range( const FunctorType & functor + , const Member ibeg , const Member iend + , reference_type update ) { - const typename PType::member_type work_end = range.end(); - for ( typename PType::member_type iwork = range.begin() ; iwork < work_end ; ++iwork ) { + #ifdef KOKKOS_OPT_RANGE_AGGRESSIVE_VECTORIZATION + #ifdef KOKKOS_HAVE_PRAGMA_IVDEP + #pragma ivdep + #endif + #endif + for ( Member iwork = ibeg ; iwork < iend ; ++iwork ) { functor( iwork , update ); } } - template< class PType > - KOKKOS_FORCEINLINE_FUNCTION static - void driver( typename Impl::enable_if< ! Impl::is_same< typename PType::work_tag , void >::value , - const FunctorType & >::type functor - , reference_type update - , const PType & range ) + template< class TagType > + inline static + typename std::enable_if< ! std::is_same< TagType , void >::value >::type + exec_range( const FunctorType & functor + , const Member ibeg , const Member iend + , reference_type update ) { - const typename PType::member_type work_end = range.end(); - for ( typename PType::member_type iwork = range.begin() ; iwork < work_end ; ++iwork ) { - functor( typename PType::work_tag() , iwork , update ); + const TagType t{} ; + #ifdef KOKKOS_OPT_RANGE_AGGRESSIVE_VECTORIZATION + #ifdef KOKKOS_HAVE_PRAGMA_IVDEP + #pragma ivdep + #endif + #endif + for ( Member iwork = ibeg ; iwork < iend ; ++iwork ) { + functor( t , iwork , update ); } } public: + inline + void execute() const + { + OpenMPexec::verify_is_process("Kokkos::OpenMP parallel_reduce"); + OpenMPexec::verify_initialized("Kokkos::OpenMP parallel_reduce"); + + OpenMPexec::resize_scratch( ValueTraits::value_size( m_functor ) , 0 ); + +#pragma omp parallel + { + OpenMPexec & exec = * OpenMPexec::get_thread_omp(); + const WorkRange range( m_policy, exec.pool_rank(), exec.pool_size() ); + ParallelReduce::template exec_range< WorkTag > + ( m_functor , range.begin() , range.end() + , ValueInit::init( m_functor , exec.scratch_reduce() ) ); + } +/* END #pragma omp parallel */ + + // Reduction: + + const pointer_type ptr = pointer_type( OpenMPexec::pool_rev(0)->scratch_reduce() ); + + for ( int i = 1 ; i < OpenMPexec::pool_size() ; ++i ) { + ValueJoin::join( m_functor , ptr , OpenMPexec::pool_rev(i)->scratch_reduce() ); + } + + Kokkos::Impl::FunctorFinal< FunctorType , WorkTag >::final( m_functor , ptr ); + + if ( m_result_ptr ) { + const int n = ValueTraits::value_count( m_functor ); + + for ( int j = 0 ; j < n ; ++j ) { m_result_ptr[j] = ptr[j] ; } + } + } + //---------------------------------------- template< class ViewType > inline - ParallelReduce( typename Impl::enable_if< - ( Impl::is_view< ViewType >::value && - Impl::is_same< typename ViewType::memory_space , HostSpace >::value - ), const FunctorType & >::type functor - , const Policy & policy - , const ViewType & result_view ) - { - OpenMPexec::verify_is_process("Kokkos::OpenMP parallel_reduce"); - OpenMPexec::verify_initialized("Kokkos::OpenMP parallel_reduce"); - - OpenMPexec::resize_scratch( ValueTraits::value_size( functor ) , 0 ); - -#pragma omp parallel + ParallelReduce( const FunctorType & arg_functor + , const Policy & arg_policy + , const ViewType & arg_result_view ) + : m_functor( arg_functor ) + , m_policy( arg_policy ) + , m_result_ptr( arg_result_view.ptr_on_device() ) { - OpenMPexec & exec = * OpenMPexec::get_thread_omp(); + static_assert( Kokkos::is_view< ViewType >::value + , "Reduction result on Kokkos::OpenMP must be a Kokkos::View" ); - driver( functor - , ValueInit::init( functor , exec.scratch_reduce() ) - , typename Policy::WorkRange( policy , exec.pool_rank() , exec.pool_size() ) - ); + static_assert( std::is_same< typename ViewType::memory_space + , Kokkos::HostSpace >::value + , "Reduction result on Kokkos::OpenMP must be a Kokkos::View in HostSpace" ); } -/* END #pragma omp parallel */ - - { - const pointer_type ptr = pointer_type( OpenMPexec::pool_rev(0)->scratch_reduce() ); - - for ( int i = 1 ; i < OpenMPexec::pool_size() ; ++i ) { - ValueJoin::join( functor , ptr , OpenMPexec::pool_rev(i)->scratch_reduce() ); - } - - Kokkos::Impl::FunctorFinal< FunctorType , WorkTag >::final( functor , ptr ); - - if ( result_view.ptr_on_device() ) { - const int n = ValueTraits::value_count( functor ); - - for ( int j = 0 ; j < n ; ++j ) { result_view.ptr_on_device()[j] = ptr[j] ; } - } - } - } }; } // namespace Impl @@ -211,106 +266,129 @@ namespace Kokkos { namespace Impl { template< class FunctorType , class Arg0 , class Arg1 , class Arg2 > -class ParallelScan< FunctorType , Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::OpenMP > > +class ParallelScan< FunctorType + , Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::OpenMP > + > { private: typedef Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::OpenMP > Policy ; - typedef typename Policy::work_tag WorkTag ; - typedef Kokkos::Impl::FunctorValueTraits< FunctorType , WorkTag > ValueTraits ; - typedef Kokkos::Impl::FunctorValueInit< FunctorType , WorkTag > ValueInit ; - typedef Kokkos::Impl::FunctorValueJoin< FunctorType , WorkTag > ValueJoin ; - typedef Kokkos::Impl::FunctorValueOps< FunctorType , WorkTag > ValueOps ; + + typedef typename Policy::work_tag WorkTag ; + typedef typename Policy::WorkRange WorkRange ; + typedef typename Policy::member_type Member ; + + typedef Kokkos::Impl::FunctorValueTraits< FunctorType, WorkTag > ValueTraits ; + typedef Kokkos::Impl::FunctorValueInit< FunctorType, WorkTag > ValueInit ; + typedef Kokkos::Impl::FunctorValueJoin< FunctorType, WorkTag > ValueJoin ; + typedef Kokkos::Impl::FunctorValueOps< FunctorType, WorkTag > ValueOps ; typedef typename ValueTraits::pointer_type pointer_type ; typedef typename ValueTraits::reference_type reference_type ; - template< class PType > - KOKKOS_FORCEINLINE_FUNCTION static - void driver( typename Impl::enable_if< Impl::is_same< typename PType::work_tag , void >::value , - const FunctorType & >::type functor - , reference_type update - , const PType & range - , const bool final ) + const FunctorType m_functor ; + const Policy m_policy ; + + template< class TagType > + inline static + typename std::enable_if< std::is_same< TagType , void >::value >::type + exec_range( const FunctorType & functor + , const Member ibeg , const Member iend + , reference_type update , const bool final ) { - const typename PType::member_type work_end = range.end(); - for ( typename PType::member_type iwork = range.begin() ; iwork < work_end ; ++iwork ) { + #ifdef KOKKOS_OPT_RANGE_AGGRESSIVE_VECTORIZATION + #ifdef KOKKOS_HAVE_PRAGMA_IVDEP + #pragma ivdep + #endif + #endif + for ( Member iwork = ibeg ; iwork < iend ; ++iwork ) { functor( iwork , update , final ); } } - template< class PType > - KOKKOS_FORCEINLINE_FUNCTION static - void driver( typename Impl::enable_if< ! Impl::is_same< typename PType::work_tag , void >::value , - const FunctorType & >::type functor - , reference_type update - , const PType & range - , const bool final ) + template< class TagType > + inline static + typename std::enable_if< ! std::is_same< TagType , void >::value >::type + exec_range( const FunctorType & functor + , const Member ibeg , const Member iend + , reference_type update , const bool final ) { - const typename PType::member_type work_end = range.end(); - for ( typename PType::member_type iwork = range.begin() ; iwork < work_end ; ++iwork ) { - functor( typename PType::work_tag() , iwork , update , final ); + const TagType t{} ; + #ifdef KOKKOS_OPT_RANGE_AGGRESSIVE_VECTORIZATION + #ifdef KOKKOS_HAVE_PRAGMA_IVDEP + #pragma ivdep + #endif + #endif + for ( Member iwork = ibeg ; iwork < iend ; ++iwork ) { + functor( t , iwork , update , final ); } } public: + inline + void execute() const + { + OpenMPexec::verify_is_process("Kokkos::OpenMP parallel_scan"); + OpenMPexec::verify_initialized("Kokkos::OpenMP parallel_scan"); + + OpenMPexec::resize_scratch( 2 * ValueTraits::value_size( m_functor ) , 0 ); + +#pragma omp parallel + { + OpenMPexec & exec = * OpenMPexec::get_thread_omp(); + const WorkRange range( m_policy, exec.pool_rank(), exec.pool_size() ); + const pointer_type ptr = + pointer_type( exec.scratch_reduce() ) + + ValueTraits::value_count( m_functor ); + ParallelScan::template exec_range< WorkTag > + ( m_functor , range.begin() , range.end() + , ValueInit::init( m_functor , ptr ) , false ); + } +/* END #pragma omp parallel */ + + { + const unsigned thread_count = OpenMPexec::pool_size(); + const unsigned value_count = ValueTraits::value_count( m_functor ); + + pointer_type ptr_prev = 0 ; + + for ( unsigned rank_rev = thread_count ; rank_rev-- ; ) { + + pointer_type ptr = pointer_type( OpenMPexec::pool_rev(rank_rev)->scratch_reduce() ); + + if ( ptr_prev ) { + for ( unsigned i = 0 ; i < value_count ; ++i ) { ptr[i] = ptr_prev[ i + value_count ] ; } + ValueJoin::join( m_functor , ptr + value_count , ptr ); + } + else { + ValueInit::init( m_functor , ptr ); + } + + ptr_prev = ptr ; + } + } + +#pragma omp parallel + { + OpenMPexec & exec = * OpenMPexec::get_thread_omp(); + const WorkRange range( m_policy, exec.pool_rank(), exec.pool_size() ); + const pointer_type ptr = pointer_type( exec.scratch_reduce() ); + ParallelScan::template exec_range< WorkTag > + ( m_functor , range.begin() , range.end() + , ValueOps::reference( ptr ) , true ); + } +/* END #pragma omp parallel */ + } + //---------------------------------------- inline - ParallelScan( const FunctorType & functor - , const Policy & policy ) - { - OpenMPexec::verify_is_process("Kokkos::OpenMP parallel_scan"); - OpenMPexec::verify_initialized("Kokkos::OpenMP parallel_scan"); - - OpenMPexec::resize_scratch( 2 * ValueTraits::value_size( functor ) , 0 ); - -#pragma omp parallel - { - OpenMPexec & exec = * OpenMPexec::get_thread_omp(); - - driver( functor - , ValueInit::init( functor , pointer_type( exec.scratch_reduce() ) + ValueTraits::value_count( functor ) ) - , typename Policy::WorkRange( policy , exec.pool_rank() , exec.pool_size() ) - , false ); - } -/* END #pragma omp parallel */ - - { - const unsigned thread_count = OpenMPexec::pool_size(); - const unsigned value_count = ValueTraits::value_count( functor ); - - pointer_type ptr_prev = 0 ; - - for ( unsigned rank_rev = thread_count ; rank_rev-- ; ) { - - pointer_type ptr = pointer_type( OpenMPexec::pool_rev(rank_rev)->scratch_reduce() ); - - if ( ptr_prev ) { - for ( unsigned i = 0 ; i < value_count ; ++i ) { ptr[i] = ptr_prev[ i + value_count ] ; } - ValueJoin::join( functor , ptr + value_count , ptr ); - } - else { - ValueInit::init( functor , ptr ); - } - - ptr_prev = ptr ; - } - } - -#pragma omp parallel - { - OpenMPexec & exec = * OpenMPexec::get_thread_omp(); - - driver( functor - , ValueOps::reference( pointer_type( exec.scratch_reduce() ) ) - , typename Policy::WorkRange( policy , exec.pool_rank() , exec.pool_size() ) - , true ); - } -/* END #pragma omp parallel */ - - } + ParallelScan( const FunctorType & arg_functor + , const Policy & arg_policy ) + : m_functor( arg_functor ) + , m_policy( arg_policy ) + {} //---------------------------------------- }; @@ -325,62 +403,84 @@ namespace Kokkos { namespace Impl { template< class FunctorType , class Arg0 , class Arg1 > -class ParallelFor< FunctorType , Kokkos::TeamPolicy< Arg0 , Arg1 , Kokkos::OpenMP > > +class ParallelFor< FunctorType + , Kokkos::TeamPolicy< Arg0 , Arg1 , Kokkos::OpenMP > + > { private: typedef Kokkos::TeamPolicy< Arg0 , Arg1 , Kokkos::OpenMP > Policy ; + typedef typename Policy::work_tag WorkTag ; + typedef typename Policy::member_type Member ; + + const FunctorType m_functor ; + const Policy m_policy ; + const int m_shmem_size ; template< class TagType > - KOKKOS_FORCEINLINE_FUNCTION static - void driver( typename Impl::enable_if< Impl::is_same< TagType , void >::value , - const FunctorType & >::type functor - , const typename Policy::member_type & member ) - { functor( member ); } + inline static + typename std::enable_if< std::is_same< TagType , void >::value >::type + exec_team( const FunctorType & functor , Member member ) + { + for ( ; member.valid() ; member.next() ) { + functor( member ); + } + } template< class TagType > - KOKKOS_FORCEINLINE_FUNCTION static - void driver( typename Impl::enable_if< ! Impl::is_same< TagType , void >::value , - const FunctorType & >::type functor - , const typename Policy::member_type & member ) - { functor( TagType() , member ); } + inline static + typename std::enable_if< ! std::is_same< TagType , void >::value >::type + exec_team( const FunctorType & functor , Member member ) + { + const TagType t{} ; + for ( ; member.valid() ; member.next() ) { + functor( t , member ); + } + } public: inline - ParallelFor( const FunctorType & functor , - const Policy & policy ) - { - OpenMPexec::verify_is_process("Kokkos::OpenMP parallel_for"); - OpenMPexec::verify_initialized("Kokkos::OpenMP parallel_for"); + void execute() const + { + OpenMPexec::verify_is_process("Kokkos::OpenMP parallel_for"); + OpenMPexec::verify_initialized("Kokkos::OpenMP parallel_for"); - const size_t team_reduce_size = Policy::member_type::team_reduce_size(); - const size_t team_shmem_size = FunctorTeamShmemSize< FunctorType >::value( functor , policy.team_size() ); + const size_t team_reduce_size = Policy::member_type::team_reduce_size(); - OpenMPexec::resize_scratch( 0 , team_reduce_size + team_shmem_size ); + OpenMPexec::resize_scratch( 0 , team_reduce_size + m_shmem_size ); #pragma omp parallel - { - typename Policy::member_type member( * OpenMPexec::get_thread_omp() , policy , team_shmem_size ); - - for ( ; member.valid() ; member.next() ) { - ParallelFor::template driver< typename Policy::work_tag >( functor , member ); + { + ParallelFor::template exec_team< WorkTag > + ( m_functor + , Member( * OpenMPexec::get_thread_omp(), m_policy, m_shmem_size) ); } - } /* END #pragma omp parallel */ - } + } - void wait() {} + inline + ParallelFor( const FunctorType & arg_functor , + const Policy & arg_policy ) + : m_functor( arg_functor ) + , m_policy( arg_policy ) + , m_shmem_size( arg_policy.scratch_size() + FunctorTeamShmemSize< FunctorType >::value( arg_functor , arg_policy.team_size() ) ) + {} }; template< class FunctorType , class Arg0 , class Arg1 > -class ParallelReduce< FunctorType , Kokkos::TeamPolicy< Arg0 , Arg1 , Kokkos::OpenMP > > +class ParallelReduce< FunctorType + , Kokkos::TeamPolicy< Arg0 , Arg1 , Kokkos::OpenMP > + > { private: typedef Kokkos::TeamPolicy< Arg0 , Arg1 , Kokkos::OpenMP > Policy ; - typedef typename Policy::work_tag WorkTag ; + + typedef typename Policy::work_tag WorkTag ; + typedef typename Policy::member_type Member ; + typedef Kokkos::Impl::FunctorValueTraits< FunctorType , WorkTag > ValueTraits ; typedef Kokkos::Impl::FunctorValueInit< FunctorType , WorkTag > ValueInit ; typedef Kokkos::Impl::FunctorValueJoin< FunctorType , WorkTag > ValueJoin ; @@ -388,102 +488,85 @@ private: typedef typename ValueTraits::pointer_type pointer_type ; typedef typename ValueTraits::reference_type reference_type ; + const FunctorType m_functor ; + const Policy m_policy ; + const pointer_type m_result_ptr ; + const int m_shmem_size ; - template< class PType > - KOKKOS_FORCEINLINE_FUNCTION static - void driver( typename Impl::enable_if< Impl::is_same< typename PType::work_tag , void >::value , - const FunctorType & >::type functor - , const typename PType::member_type & member - , reference_type update ) - { functor( member , update ); } + template< class TagType > + inline static + typename std::enable_if< std::is_same< TagType , void >::value >::type + exec_team( const FunctorType & functor , Member member , reference_type update ) + { + for ( ; member.valid() ; member.next() ) { + functor( member , update ); + } + } - template< class PType > - KOKKOS_FORCEINLINE_FUNCTION static - void driver( typename Impl::enable_if< ! Impl::is_same< typename PType::work_tag , void >::value , - const FunctorType & >::type functor - , const typename PType::member_type & member - , reference_type update ) - { functor( typename PType::work_tag() , member , update ); } + template< class TagType > + inline static + typename std::enable_if< ! std::is_same< TagType , void >::value >::type + exec_team( const FunctorType & functor , Member member , reference_type update ) + { + const TagType t{} ; + for ( ; member.valid() ; member.next() ) { + functor( t , member , update ); + } + } public: inline - ParallelReduce( const FunctorType & functor , - const Policy & policy ) - { - OpenMPexec::verify_is_process("Kokkos::OpenMP parallel_reduce"); + void execute() const + { + OpenMPexec::verify_is_process("Kokkos::OpenMP parallel_reduce"); - const size_t team_reduce_size = Policy::member_type::team_reduce_size(); - const size_t team_shmem_size = FunctorTeamShmemSize< FunctorType >::value( functor , policy.team_size() ); + const size_t team_reduce_size = Policy::member_type::team_reduce_size(); - OpenMPexec::resize_scratch( ValueTraits::value_size( functor ) , team_reduce_size + team_shmem_size ); + OpenMPexec::resize_scratch( ValueTraits::value_size( m_functor ) , team_reduce_size + m_shmem_size ); #pragma omp parallel - { - OpenMPexec & exec = * OpenMPexec::get_thread_omp(); + { + OpenMPexec & exec = * OpenMPexec::get_thread_omp(); - reference_type update = ValueInit::init( functor , exec.scratch_reduce() ); - - for ( typename Policy::member_type member( exec , policy , team_shmem_size ); member.valid() ; member.next() ) { - ParallelReduce::template driver< Policy >( functor , member , update ); + ParallelReduce::template exec_team< WorkTag > + ( m_functor + , Member( exec , m_policy , m_shmem_size ) + , ValueInit::init( m_functor , exec.scratch_reduce() ) ); } - } /* END #pragma omp parallel */ - { - typedef Kokkos::Impl::FunctorValueJoin< FunctorType , WorkTag , reference_type > Join ; + { + const pointer_type ptr = pointer_type( OpenMPexec::pool_rev(0)->scratch_reduce() ); - const pointer_type ptr = pointer_type( OpenMPexec::pool_rev(0)->scratch_reduce() ); + int max_active_threads = OpenMPexec::pool_size(); + if( max_active_threads > m_policy.league_size()* m_policy.team_size() ) + max_active_threads = m_policy.league_size()* m_policy.team_size(); - for ( int i = 1 ; i < OpenMPexec::pool_size() ; ++i ) { - Join::join( functor , ptr , OpenMPexec::pool_rev(i)->scratch_reduce() ); + for ( int i = 1 ; i < max_active_threads ; ++i ) { + ValueJoin::join( m_functor , ptr , OpenMPexec::pool_rev(i)->scratch_reduce() ); + } + + Kokkos::Impl::FunctorFinal< FunctorType , WorkTag >::final( m_functor , ptr ); + + if ( m_result_ptr ) { + const int n = ValueTraits::value_count( m_functor ); + + for ( int j = 0 ; j < n ; ++j ) { m_result_ptr[j] = ptr[j] ; } + } } - - Kokkos::Impl::FunctorFinal< FunctorType , WorkTag >::final( functor , ptr ); } - } template< class ViewType > inline - ParallelReduce( const FunctorType & functor , - const Policy & policy , - const ViewType & result ) - { - OpenMPexec::verify_is_process("Kokkos::OpenMP parallel_reduce"); - - const size_t team_reduce_size = Policy::member_type::team_reduce_size(); - const size_t team_shmem_size = FunctorTeamShmemSize< FunctorType >::value( functor , policy.team_size() ); - - OpenMPexec::resize_scratch( ValueTraits::value_size( functor ) , team_reduce_size + team_shmem_size ); - -#pragma omp parallel - { - OpenMPexec & exec = * OpenMPexec::get_thread_omp(); - - reference_type update = ValueInit::init( functor , exec.scratch_reduce() ); - - for ( typename Policy::member_type member( exec , policy , team_shmem_size ); member.valid() ; member.next() ) { - ParallelReduce::template driver< Policy >( functor , member , update ); - } - } -/* END #pragma omp parallel */ - - { - const pointer_type ptr = pointer_type( OpenMPexec::pool_rev(0)->scratch_reduce() ); - - for ( int i = 1 ; i < OpenMPexec::pool_size() ; ++i ) { - ValueJoin::join( functor , ptr , OpenMPexec::pool_rev(i)->scratch_reduce() ); - } - - Kokkos::Impl::FunctorFinal< FunctorType , WorkTag >::final( functor , ptr ); - - const int n = ValueTraits::value_count( functor ); - - for ( int j = 0 ; j < n ; ++j ) { result.ptr_on_device()[j] = ptr[j] ; } - } - } - - void wait() {} + ParallelReduce( const FunctorType & arg_functor , + const Policy & arg_policy , + const ViewType & arg_result ) + : m_functor( arg_functor ) + , m_policy( arg_policy ) + , m_result_ptr( arg_result.ptr_on_device() ) + , m_shmem_size( arg_policy.scratch_size() + FunctorTeamShmemSize< FunctorType >::value( arg_functor , arg_policy.team_size() ) ) + {} }; } // namespace Impl diff --git a/lib/kokkos/core/src/OpenMP/Kokkos_OpenMPexec.cpp b/lib/kokkos/core/src/OpenMP/Kokkos_OpenMPexec.cpp index ed98fd2f97..3e0fc42a68 100644 --- a/lib/kokkos/core/src/OpenMP/Kokkos_OpenMPexec.cpp +++ b/lib/kokkos/core/src/OpenMP/Kokkos_OpenMPexec.cpp @@ -1,13 +1,13 @@ /* //@HEADER // ************************************************************************ -// +// // Kokkos v. 2.0 // Copyright (2014) Sandia Corporation -// +// // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, // the U.S. Government retains certain rights in this software. -// +// // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -36,7 +36,7 @@ // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Questions? Contact H. Carter Edwards (hcedwar@sandia.gov) -// +// // ************************************************************************ //@HEADER */ @@ -84,8 +84,16 @@ int OpenMPexec::m_map_rank[ OpenMPexec::MAX_THREAD_COUNT ] = { 0 }; int OpenMPexec::m_pool_topo[ 4 ] = { 0 }; +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + OpenMPexec::Pool OpenMPexec::m_pool; +#else + +OpenMPexec * OpenMPexec::m_pool[ OpenMPexec::MAX_THREAD_COUNT ] = { 0 }; + +#endif + void OpenMPexec::verify_is_process( const char * const label ) { if ( omp_in_parallel() ) { @@ -102,6 +110,13 @@ void OpenMPexec::verify_initialized( const char * const label ) msg.append( " ERROR: not initialized" ); Kokkos::Impl::throw_runtime_exception( msg ); } + + if ( omp_get_max_threads() != Kokkos::OpenMP::thread_pool_size(0) ) { + std::string msg( label ); + msg.append( " ERROR: Initialized but threads modified inappropriately" ); + Kokkos::Impl::throw_runtime_exception( msg ); + } + } void OpenMPexec::clear_scratch() @@ -109,7 +124,16 @@ void OpenMPexec::clear_scratch() #pragma omp parallel { const int rank_rev = m_map_rank[ omp_get_thread_num() ]; +#if defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + typedef Kokkos::Experimental::Impl::SharedAllocationRecord< Kokkos::HostSpace , void > Record ; + if ( m_pool[ rank_rev ] ) { + Record * const r = Record::get_record( m_pool[ rank_rev ] ); + m_pool[ rank_rev ] = 0 ; + Record::decrement( r ); + } +#else m_pool.at(rank_rev).clear(); +#endif } /* END #pragma omp parallel */ } @@ -147,7 +171,27 @@ void OpenMPexec::resize_scratch( size_t reduce_size , size_t thread_size ) const int rank_rev = m_map_rank[ omp_get_thread_num() ]; const int rank = pool_size - ( rank_rev + 1 ); - m_pool.at(rank_rev) = HostSpace::allocate_and_track( "openmp_scratch", alloc_size ); +#if defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + + typedef Kokkos::Experimental::Impl::SharedAllocationRecord< Kokkos::HostSpace , void > Record ; + + Record * const r = Record::allocate( Kokkos::HostSpace() + , "openmp_scratch" + , alloc_size ); + + Record::increment( r ); + + m_pool[ rank_rev ] = reinterpret_cast( r->data() ); + +#else + + #pragma omp critical + { + m_pool.at(rank_rev) = HostSpace::allocate_and_track( "openmp_scratch", alloc_size ); + } + +#endif + new ( m_pool[ rank_rev ] ) OpenMPexec( rank , ALLOC_EXEC , reduce_size , thread_size ); } /* END #pragma omp parallel */ @@ -248,7 +292,9 @@ void OpenMP::initialize( unsigned thread_count , // Reverse the rank for threads so that the scan operation reduces to the highest rank thread. const unsigned omp_rank = omp_get_thread_num(); - const unsigned thread_r = Impl::s_using_hwloc ? Kokkos::hwloc::bind_this_thread( thread_count , threads_coord ) : omp_rank ; + const unsigned thread_r = Impl::s_using_hwloc && Kokkos::hwloc::can_bind_threads() + ? Kokkos::hwloc::bind_this_thread( thread_count , threads_coord ) + : omp_rank ; Impl::OpenMPexec::m_map_rank[ omp_rank ] = thread_r ; } @@ -293,7 +339,7 @@ void OpenMP::finalize() omp_set_num_threads(1); - if ( Impl::s_using_hwloc ) { + if ( Impl::s_using_hwloc && Kokkos::hwloc::can_bind_threads() ) { hwloc::unbind_this_thread(); } } diff --git a/lib/kokkos/core/src/OpenMP/Kokkos_OpenMPexec.hpp b/lib/kokkos/core/src/OpenMP/Kokkos_OpenMPexec.hpp index 1ab08f648d..d0086a2432 100644 --- a/lib/kokkos/core/src/OpenMP/Kokkos_OpenMPexec.hpp +++ b/lib/kokkos/core/src/OpenMP/Kokkos_OpenMPexec.hpp @@ -61,6 +61,8 @@ public: enum { MAX_THREAD_COUNT = 4096 }; +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + struct Pool { Pool() : m_trackers() {} @@ -78,11 +80,21 @@ public: } }; + private: + static Pool m_pool; // Indexed by: m_pool_rank_rev + +#else + +private: + + static OpenMPexec * m_pool[ MAX_THREAD_COUNT ]; // Indexed by: m_pool_rank_rev + +#endif + static int m_pool_topo[ 4 ]; static int m_map_rank[ MAX_THREAD_COUNT ]; - static Pool m_pool; // Indexed by: m_pool_rank_rev friend class Kokkos::OpenMP ; @@ -193,12 +205,14 @@ private: inline bool team_fan_in() const { + memory_fence(); for ( int n = 1 , j ; ( ( j = m_team_rank_rev + n ) < m_team_size ) && ! ( m_team_rank_rev & n ) ; n <<= 1 ) { m_exec.pool_rev( m_team_base_rev + j )->state_wait( Active ); } if ( m_team_rank_rev ) { m_exec.state_set( Rendezvous ); + memory_fence(); m_exec.state_wait( Rendezvous ); } @@ -208,8 +222,10 @@ private: inline void team_fan_out() const { + memory_fence(); for ( int n = 1 , j ; ( ( j = m_team_rank_rev + n ) < m_team_size ) && ! ( m_team_rank_rev & n ) ; n <<= 1 ) { m_exec.pool_rev( m_team_base_rev + j )->state_set( Active ); + memory_fence(); } } @@ -265,6 +281,7 @@ public: { return ValueType(); } #else { + memory_fence(); typedef ValueType value_type; const JoinLambdaAdapter op(op_in); #endif @@ -301,6 +318,7 @@ public: for ( int i = 1 ; i < m_team_size ; ++i ) { op.join( *team_value , *((type*) m_exec.pool_rev( m_team_base_rev + i )->scratch_thread()) ); } + memory_fence(); // The base team member may "lap" the other team members, // copy to their local value before proceeding. @@ -484,6 +502,8 @@ private: int m_team_alloc ; int m_team_iter ; + size_t m_scratch_size; + inline void init( const int league_size_request , const int team_size_request ) { @@ -511,13 +531,49 @@ public: inline int team_size() const { return m_team_size ; } inline int league_size() const { return m_league_size ; } + inline size_t scratch_size() const { return m_scratch_size ; } /** \brief Specify league size, request team size */ - TeamPolicy( execution_space & , int league_size_request , int team_size_request , int vector_length_request = 1) - { init( league_size_request , team_size_request ); (void) vector_length_request; } + TeamPolicy( execution_space & + , int league_size_request + , int team_size_request + , int /* vector_length_request */ = 1 ) + : m_scratch_size ( 0 ) + { init( league_size_request , team_size_request ); } - TeamPolicy( int league_size_request , int team_size_request , int vector_length_request = 1 ) - { init( league_size_request , team_size_request ); (void) vector_length_request; } + TeamPolicy( execution_space & + , int league_size_request + , const Kokkos::AUTO_t & /* team_size_request */ + , int /* vector_length_request */ = 1) + : m_scratch_size ( 0 ) + { init( league_size_request , execution_space::thread_pool_size(2) ); } + + TeamPolicy( int league_size_request + , int team_size_request + , int /* vector_length_request */ = 1 ) + : m_scratch_size ( 0 ) + { init( league_size_request , team_size_request ); } + + TeamPolicy( int league_size_request + , const Kokkos::AUTO_t & /* team_size_request */ + , int /* vector_length_request */ = 1 ) + : m_scratch_size ( 0 ) + { init( league_size_request , execution_space::thread_pool_size(2) ); } + + template + TeamPolicy( int league_size_request + , int team_size_request + , const Experimental::TeamScratchRequest & scratch_request ) + : m_scratch_size(scratch_request.total(team_size_request)) + { init(league_size_request,team_size_request); } + + + template + TeamPolicy( int league_size_request + , const Kokkos::AUTO_t & /* team_size_request */ + , const Experimental::TeamScratchRequest & scratch_request ) + : m_scratch_size(scratch_request.total(execution_space::thread_pool_size(2))) + { init(league_size_request,execution_space::thread_pool_size(2)); } inline int team_alloc() const { return m_team_alloc ; } inline int team_iter() const { return m_team_iter ; } diff --git a/lib/kokkos/core/src/Qthread/Kokkos_QthreadExec.hpp b/lib/kokkos/core/src/Qthread/Kokkos_QthreadExec.hpp index d772aee2bf..e3702167ef 100644 --- a/lib/kokkos/core/src/Qthread/Kokkos_QthreadExec.hpp +++ b/lib/kokkos/core/src/Qthread/Kokkos_QthreadExec.hpp @@ -212,7 +212,7 @@ public: // Join from lower ranking to higher ranking worker. // Value at m_worker_base[n-1] is zero so skip adding it to m_worker_base[n-2]. - for ( int i = m_worker_size - 1 ; --i ; ) { + for ( int i = m_worker_size - 1 ; --i > 0 ; ) { ValueJoin::join( func , m_worker_base[i-1]->m_scratch_alloc , m_worker_base[i]->m_scratch_alloc ); } } diff --git a/lib/kokkos/core/src/Qthread/Kokkos_Qthread_Parallel.hpp b/lib/kokkos/core/src/Qthread/Kokkos_Qthread_Parallel.hpp index dc76a0c426..50e2a058c9 100644 --- a/lib/kokkos/core/src/Qthread/Kokkos_Qthread_Parallel.hpp +++ b/lib/kokkos/core/src/Qthread/Kokkos_Qthread_Parallel.hpp @@ -61,47 +61,50 @@ namespace Impl { //---------------------------------------------------------------------------- template< class FunctorType , class Arg0 , class Arg1 , class Arg2 > -class ParallelFor< FunctorType , Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Qthread > > +class ParallelFor< FunctorType + , Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Qthread > + > { private: typedef Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Qthread > Policy ; - const FunctorType m_func ; + typedef typename Policy::work_tag WorkTag ; + typedef typename Policy::member_type Member ; + typedef typename Policy::WorkRange WorkRange ; + + const FunctorType m_functor ; const Policy m_policy ; - template< class PType > - KOKKOS_FORCEINLINE_FUNCTION static - void driver( typename Impl::enable_if< - ( Impl::is_same< typename PType::work_tag , void >::value ) - , const FunctorType & >::type functor - , const PType & range ) + template< class TagType > + inline static + typename std::enable_if< std::is_same< TagType , void >::value >::type + exec_range( const FunctorType & functor , const Member ibeg , const Member iend ) { - const typename PType::member_type e = range.end(); - for ( typename PType::member_type i = range.begin() ; i < e ; ++i ) { + for ( Member i = ibeg ; i < iend ; ++i ) { functor( i ); } } - template< class PType > - KOKKOS_FORCEINLINE_FUNCTION static - void driver( typename Impl::enable_if< - ( ! Impl::is_same< typename PType::work_tag , void >::value ) - , const FunctorType & >::type functor - , const PType & range ) + template< class TagType > + inline static + typename std::enable_if< ! std::is_same< TagType , void >::value >::type + exec_range( const FunctorType & functor , const Member ibeg , const Member iend ) { - const typename PType::member_type e = range.end(); - for ( typename PType::member_type i = range.begin() ; i < e ; ++i ) { - functor( typename PType::work_tag() , i ); + const TagType t{} ; + for ( Member i = ibeg ; i < iend ; ++i ) { + functor( t , i ); } } // Function is called once by every concurrent thread. - static void execute( QthreadExec & exec , const void * arg ) + static void exec( QthreadExec & exec , const void * arg ) { const ParallelFor & self = * ((const ParallelFor *) arg ); - driver( self.m_func , typename Policy::WorkRange( self.m_policy , exec.worker_rank() , exec.worker_size() ) ); + const WorkRange range( self.m_policy, exec.worker_rank(), exec.worker_size() ); + + ParallelFor::template exec_range< WorkTag > ( self.m_functor , range.begin() , range.end() ); // All threads wait for completion. exec.exec_all_barrier(); @@ -109,95 +112,110 @@ private: public: - ParallelFor( const FunctorType & functor - , const Policy & policy - ) - : m_func( functor ) - , m_policy( policy ) + inline + void execute() const { - Impl::QthreadExec::exec_all( Qthread::instance() , & ParallelFor::execute , this ); + Impl::QthreadExec::exec_all( Qthread::instance() , & ParallelFor::exec , this ); + } + + ParallelFor( const FunctorType & arg_functor + , const Policy & arg_policy + ) + : m_functor( arg_functor ) + , m_policy( arg_policy ) + { } }; //---------------------------------------------------------------------------- template< class FunctorType , class Arg0 , class Arg1 , class Arg2 > -class ParallelReduce< FunctorType , Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Qthread > > +class ParallelReduce< FunctorType + , Kokkos::RangePolicy< Arg0, Arg1, Arg2, Kokkos::Qthread > + > { private: typedef Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Qthread > Policy ; - typedef Kokkos::Impl::FunctorValueTraits< FunctorType , typename Policy::work_tag > ValueTraits ; - typedef Kokkos::Impl::FunctorValueInit< FunctorType , typename Policy::work_tag > ValueInit ; + + typedef typename Policy::work_tag WorkTag ; + typedef typename Policy::member_type Member ; + typedef typename Policy::WorkRange WorkRange ; + + typedef Kokkos::Impl::FunctorValueTraits< FunctorType, WorkTag > ValueTraits ; + typedef Kokkos::Impl::FunctorValueInit< FunctorType, WorkTag > ValueInit ; typedef typename ValueTraits::pointer_type pointer_type ; typedef typename ValueTraits::reference_type reference_type ; - const FunctorType m_func ; + const FunctorType m_functor ; const Policy m_policy ; + const pointer_type m_result_ptr ; - template< class PType > - KOKKOS_FORCEINLINE_FUNCTION static - void driver( typename Impl::enable_if< - ( Impl::is_same< typename PType::work_tag , void >::value ) - , const FunctorType & >::type functor - , reference_type update - , const PType & range ) + template< class TagType > + inline static + typename std::enable_if< std::is_same< TagType , void >::value >::type + exec_range( const FunctorType & functor + , const Member ibeg , const Member iend + , reference_type update ) { - const typename PType::member_type e = range.end(); - for ( typename PType::member_type i = range.begin() ; i < e ; ++i ) { + for ( Member i = ibeg ; i < iend ; ++i ) { functor( i , update ); } } - template< class PType > - KOKKOS_FORCEINLINE_FUNCTION static - void driver( typename Impl::enable_if< - ( ! Impl::is_same< typename PType::work_tag , void >::value ) - , const FunctorType & >::type functor - , reference_type update - , const PType & range ) + template< class TagType > + inline static + typename std::enable_if< ! std::is_same< TagType , void >::value >::type + exec_range( const FunctorType & functor + , const Member ibeg , const Member iend + , reference_type update ) { - const typename PType::member_type e = range.end(); - for ( typename PType::member_type i = range.begin() ; i < e ; ++i ) { - functor( typename PType::work_tag() , i , update ); + const TagType t{} ; + for ( Member i = ibeg ; i < iend ; ++i ) { + functor( t , i , update ); } } - static void execute( QthreadExec & exec , const void * arg ) + static void exec( QthreadExec & exec , const void * arg ) { const ParallelReduce & self = * ((const ParallelReduce *) arg ); - driver( self.m_func - , ValueInit::init( self.m_func , exec.exec_all_reduce_value() ) - , typename Policy::WorkRange( self.m_policy , exec.worker_rank() , exec.worker_size() ) - ); + const WorkRange range( self.m_policy, exec.worker_rank(), exec.worker_size() ); - exec.template exec_all_reduce( self.m_func ); + ParallelReduce::template exec_range< WorkTag >( + self.m_functor, range.begin(), range.end(), + ValueInit::init( self.m_functor , exec.exec_all_reduce_value() ) ); + + exec.template exec_all_reduce( self.m_functor ); } public: - template< class HostViewType > - ParallelReduce( const FunctorType & functor - , const Policy & policy - , const HostViewType & result_view ) - : m_func( functor ) - , m_policy( policy ) + inline + void execute() const { - QthreadExec::resize_worker_scratch( ValueTraits::value_size( m_func ) , 0 ); - - Impl::QthreadExec::exec_all( Qthread::instance() , & ParallelReduce::execute , this ); + QthreadExec::resize_worker_scratch( ValueTraits::value_size( m_functor ) , 0 ); + Impl::QthreadExec::exec_all( Qthread::instance() , & ParallelReduce::exec , this ); const pointer_type data = (pointer_type) QthreadExec::exec_all_reduce_result(); - Kokkos::Impl::FunctorFinal< FunctorType , typename Policy::work_tag >::final( m_func , data ); + Kokkos::Impl::FunctorFinal< FunctorType , typename Policy::work_tag >::final( m_functor , data ); - if ( result_view.ptr_on_device() ) { - const unsigned n = ValueTraits::value_count( m_func ); - for ( unsigned i = 0 ; i < n ; ++i ) { result_view.ptr_on_device()[i] = data[i]; } + if ( m_result_ptr ) { + const unsigned n = ValueTraits::value_count( m_functor ); + for ( unsigned i = 0 ; i < n ; ++i ) { m_result_ptr[i] = data[i]; } } } + + template< class HostViewType > + ParallelReduce( const FunctorType & arg_functor + , const Policy & arg_policy + , const HostViewType & arg_result_view ) + : m_functor( arg_functor ) + , m_policy( arg_policy ) + , m_result_ptr( arg_result_view.ptr_on_device() ) + { } }; //---------------------------------------------------------------------------- @@ -208,50 +226,63 @@ class ParallelFor< FunctorType , TeamPolicy< Arg0 , Arg1 , Kokkos::Qthread > > private: typedef TeamPolicy< Arg0 , Arg1 , Kokkos::Qthread > Policy ; + typedef typename Policy::member_type Member ; + typedef typename Policy::work_tag WorkTag ; - const FunctorType m_func ; - const Policy m_team ; + const FunctorType m_functor ; + const Policy m_policy ; template< class TagType > - KOKKOS_FORCEINLINE_FUNCTION - void driver( typename Impl::enable_if< Impl::is_same< TagType , void >::value , - const typename Policy::member_type & >::type member ) const - { m_func( member ); } + inline static + typename std::enable_if< std::is_same< TagType , void >::value >::type + exec_team( const FunctorType & functor , Member member ) + { + while ( member ) { + functor( member ); + member.team_barrier(); + member.next_team(); + } + } template< class TagType > - KOKKOS_FORCEINLINE_FUNCTION - void driver( typename Impl::enable_if< ! Impl::is_same< TagType , void >::value , - const typename Policy::member_type & >::type member ) const - { m_func( TagType() , member ); } + inline static + typename std::enable_if< ! std::is_same< TagType , void >::value >::type + exec_team( const FunctorType & functor , Member member ) + { + const TagType t{} ; + while ( member ) { + functor( t , member ); + member.team_barrier(); + member.next_team(); + } + } - static void execute( QthreadExec & exec , const void * arg ) + static void exec( QthreadExec & exec , const void * arg ) { const ParallelFor & self = * ((const ParallelFor *) arg ); - typename Policy::member_type member( exec , self.m_team ); - - while ( member ) { - self.ParallelFor::template driver< typename Policy::work_tag >( member ); - member.team_barrier(); - member.next_team(); - } + ParallelFor::template exec_team< WorkTag > + ( self.m_functor , Member( exec , self.m_policy ) ); exec.exec_all_barrier(); } public: - ParallelFor( const FunctorType & functor , - const Policy & policy ) - : m_func( functor ) - , m_team( policy ) + inline + void execute() const { QthreadExec::resize_worker_scratch ( /* reduction memory */ 0 - , /* team shared memory */ FunctorTeamShmemSize< FunctorType >::value( functor , policy.team_size() ) ); - - Impl::QthreadExec::exec_all( Qthread::instance() , & ParallelFor::execute , this ); + , /* team shared memory */ FunctorTeamShmemSize< FunctorType >::value( m_functor , m_policy.team_size() ) ); + Impl::QthreadExec::exec_all( Qthread::instance() , & ParallelFor::exec , this ); } + + ParallelFor( const FunctorType & arg_functor , + const Policy & arg_policy ) + : m_functor( arg_functor ) + , m_policy( arg_policy ) + { } }; //---------------------------------------------------------------------------- @@ -263,148 +294,170 @@ private: typedef TeamPolicy< Arg0 , Arg1 , Kokkos::Qthread > Policy ; - typedef Kokkos::Impl::FunctorValueTraits< FunctorType , typename Policy::work_tag > ValueTraits ; - typedef Kokkos::Impl::FunctorValueInit< FunctorType , typename Policy::work_tag > ValueInit ; + typedef typename Policy::work_tag WorkTag ; + typedef typename Policy::member_type Member ; + + typedef Kokkos::Impl::FunctorValueTraits< FunctorType, WorkTag > ValueTraits ; + typedef Kokkos::Impl::FunctorValueInit< FunctorType, WorkTag > ValueInit ; typedef typename ValueTraits::pointer_type pointer_type ; typedef typename ValueTraits::reference_type reference_type ; - const FunctorType m_func ; - const Policy m_team ; + const FunctorType m_functor ; + const Policy m_policy ; + const pointer_type m_result_ptr ; template< class TagType > - KOKKOS_FORCEINLINE_FUNCTION - void driver( typename Impl::enable_if< Impl::is_same< TagType , void >::value , - const typename Policy::member_type & >::type member - , reference_type update ) const - { m_func( member , update ); } + inline static + typename std::enable_if< std::is_same< TagType , void >::value >::type + exec_team( const FunctorType & functor , Member member , reference_type update ) + { + while ( member ) { + functor( member , update ); + member.team_barrier(); + member.next_team(); + } + } template< class TagType > - KOKKOS_FORCEINLINE_FUNCTION - void driver( typename Impl::enable_if< ! Impl::is_same< TagType , void >::value , - const typename Policy::member_type & >::type member - , reference_type update ) const - { m_func( TagType() , member , update ); } + inline static + typename std::enable_if< ! std::is_same< TagType , void >::value >::type + exec_team( const FunctorType & functor , Member member , reference_type update ) + { + const TagType t{} ; + while ( member ) { + functor( t , member , update ); + member.team_barrier(); + member.next_team(); + } + } - static void execute( QthreadExec & exec , const void * arg ) + static void exec( QthreadExec & exec , const void * arg ) { const ParallelReduce & self = * ((const ParallelReduce *) arg ); - // Initialize thread-local value - reference_type update = ValueInit::init( self.m_func , exec.exec_all_reduce_value() ); + ParallelReduce::template exec_team< WorkTag > + ( self.m_functor + , Member( exec , self.m_policy ) + , ValueInit::init( self.m_functor , exec.exec_all_reduce_value() ) ); - typename Policy::member_type member( exec , self.m_team ); - - while ( member ) { - self.ParallelReduce::template driver< typename Policy::work_tag >( member , update ); - member.team_barrier(); - member.next_team(); - } - - exec.template exec_all_reduce< FunctorType , typename Policy::work_tag >( self.m_func ); + exec.template exec_all_reduce< FunctorType , WorkTag >( self.m_functor ); } public: - template< class ViewType > - ParallelReduce( const FunctorType & functor , - const Policy & policy , - const ViewType & result ) - : m_func( functor ) - , m_team( policy ) + inline + void execute() const { QthreadExec::resize_worker_scratch - ( /* reduction memory */ ValueTraits::value_size( functor ) - , /* team shared memory */ FunctorTeamShmemSize< FunctorType >::value( functor , policy.team_size() ) ); + ( /* reduction memory */ ValueTraits::value_size( m_functor ) + , /* team shared memory */ FunctorTeamShmemSize< FunctorType >::value( m_functor , m_policy.team_size() ) ); - Impl::QthreadExec::exec_all( Qthread::instance() , & ParallelReduce::execute , this ); + Impl::QthreadExec::exec_all( Qthread::instance() , & ParallelReduce::exec , this ); const pointer_type data = (pointer_type) QthreadExec::exec_all_reduce_result(); - Kokkos::Impl::FunctorFinal< FunctorType , typename Policy::work_tag >::final( m_func , data ); + Kokkos::Impl::FunctorFinal< FunctorType , typename Policy::work_tag >::final( m_functor , data ); - const unsigned n = ValueTraits::value_count( m_func ); - for ( unsigned i = 0 ; i < n ; ++i ) { result.ptr_on_device()[i] = data[i]; } + if ( m_result_ptr ) { + const unsigned n = ValueTraits::value_count( m_functor ); + for ( unsigned i = 0 ; i < n ; ++i ) { m_result_ptr[i] = data[i]; } + } } + + template< class ViewType > + ParallelReduce( const FunctorType & arg_functor , + const Policy & arg_policy , + const ViewType & arg_result ) + : m_functor( arg_functor ) + , m_policy( arg_policy ) + , m_result_ptr( arg_result.ptr_on_device() ) + { } }; //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- template< class FunctorType , class Arg0 , class Arg1 , class Arg2 > -class ParallelScan< FunctorType , Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Qthread > > +class ParallelScan< FunctorType + , Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Qthread > + > { private: typedef Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Qthread > Policy ; - typedef Kokkos::Impl::FunctorValueTraits< FunctorType , typename Policy::work_tag > ValueTraits ; - typedef Kokkos::Impl::FunctorValueInit< FunctorType , typename Policy::work_tag > ValueInit ; + + typedef typename Policy::work_tag WorkTag ; + typedef typename Policy::member_type Member ; + typedef typename Policy::WorkRange WorkRange ; + + typedef Kokkos::Impl::FunctorValueTraits< FunctorType, WorkTag > ValueTraits ; + typedef Kokkos::Impl::FunctorValueInit< FunctorType, WorkTag > ValueInit ; typedef typename ValueTraits::pointer_type pointer_type ; typedef typename ValueTraits::reference_type reference_type ; - const FunctorType m_func ; + const FunctorType m_functor ; const Policy m_policy ; - template< class PType > - KOKKOS_FORCEINLINE_FUNCTION static - void driver( typename Impl::enable_if< - ( Impl::is_same< typename PType::work_tag , void >::value ) - , const FunctorType & >::type functor - , reference_type update - , const bool final - , const PType & range ) + template< class TagType > + inline static + typename std::enable_if< std::is_same< TagType , void >::value >::type + exec_range( const FunctorType & functor + , const Member ibeg , const Member iend + , reference_type update , const bool final ) { - const typename PType::member_type e = range.end(); - for ( typename PType::member_type i = range.begin() ; i < e ; ++i ) { + for ( Member i = ibeg ; i < iend ; ++i ) { functor( i , update , final ); } } - template< class PType > - KOKKOS_FORCEINLINE_FUNCTION static - void driver( typename Impl::enable_if< - ( ! Impl::is_same< typename PType::work_tag , void >::value ) - , const FunctorType & >::type functor - , reference_type update - , const bool final - , const PType & range ) + template< class TagType > + inline static + typename std::enable_if< ! std::is_same< TagType , void >::value >::type + exec_range( const FunctorType & functor + , const Member ibeg , const Member iend + , reference_type update , const bool final ) { - const typename PType::member_type e = range.end(); - for ( typename PType::member_type i = range.begin() ; i < e ; ++i ) { - functor( typename PType::work_tag() , i , update , final ); + const TagType t{} ; + for ( Member i = ibeg ; i < iend ; ++i ) { + functor( t , i , update , final ); } } - static void execute( QthreadExec & exec , const void * arg ) + static void exec( QthreadExec & exec , const void * arg ) { const ParallelScan & self = * ((const ParallelScan *) arg ); - const typename Policy::WorkRange range( self.m_policy , exec.worker_rank() , exec.worker_size() ); + const WorkRange range( self.m_policy , exec.worker_rank() , exec.worker_size() ); // Initialize thread-local value - reference_type update = ValueInit::init( self.m_func , exec.exec_all_reduce_value() ); + reference_type update = ValueInit::init( self.m_functor , exec.exec_all_reduce_value() ); - driver( self.m_func , update , false , range ); + ParallelScan::template exec_range< WorkTag >( self.m_functor, range.begin() , range.end() , update , false ); - exec.template exec_all_scan< FunctorType , typename Policy::work_tag >( self.m_func ); + exec.template exec_all_scan< FunctorType , typename Policy::work_tag >( self.m_functor ); - driver( self.m_func , update , true , range ); + ParallelScan::template exec_range< WorkTag >( self.m_functor , range.begin() , range.end() , update , true ); exec.exec_all_barrier(); } public: - ParallelScan( const FunctorType & functor - , const Policy & policy - ) - : m_func( functor ) - , m_policy( policy ) + inline + void execute() const { - QthreadExec::resize_worker_scratch( ValueTraits::value_size( m_func ) , 0 ); + QthreadExec::resize_worker_scratch( ValueTraits::value_size( m_functor ) , 0 ); + Impl::QthreadExec::exec_all( Qthread::instance() , & ParallelScan::exec , this ); + } - Impl::QthreadExec::exec_all( Qthread::instance() , & ParallelScan::execute , this ); + ParallelScan( const FunctorType & arg_functor + , const Policy & arg_policy + ) + : m_functor( arg_functor ) + , m_policy( arg_policy ) + { } }; diff --git a/lib/kokkos/core/src/Qthread/Kokkos_Qthread_TaskPolicy.cpp b/lib/kokkos/core/src/Qthread/Kokkos_Qthread_TaskPolicy.cpp index 9de9748de9..4f0ad49fa2 100644 --- a/lib/kokkos/core/src/Qthread/Kokkos_Qthread_TaskPolicy.cpp +++ b/lib/kokkos/core/src/Qthread/Kokkos_Qthread_TaskPolicy.cpp @@ -255,6 +255,56 @@ void Task::assign( Task ** const lhs , Task * rhs , const bool no_throw ) //---------------------------------------------------------------------------- +void Task::closeout() +{ + enum { RESPAWN = int( Kokkos::Experimental::TASK_STATE_WAITING ) | + int( Kokkos::Experimental::TASK_STATE_EXECUTING ) }; + +#if 0 +fprintf( stdout + , "worker(%d.%d) task 0x%.12lx %s\n" + , qthread_shep() + , qthread_worker_local(NULL) + , reinterpret_cast(this) + , ( m_state == RESPAWN ? "respawn" : "complete" ) + ); +fflush(stdout); +#endif + + // When dependent tasks run there would be a race + // condition between destroying this task and + // querying the active count pointer from this task. + int volatile * const active_count = m_active_count ; + + if ( m_state == RESPAWN ) { + // Task requests respawn, set state to waiting and reschedule the task + m_state = Kokkos::Experimental::TASK_STATE_WAITING ; + schedule(); + } + else { + + // Task did not respawn, is complete + m_state = Kokkos::Experimental::TASK_STATE_COMPLETE ; + + // Release dependences before allowing dependent tasks to run. + // Otherwise there is a thread race condition for removing dependences. + for ( int i = 0 ; i < m_dep_size ; ++i ) { + assign( & m_dep[i] , 0 ); + } + + // Set qthread FEB to full so that dependent tasks are allowed to execute. + // This 'task' may be deleted immediately following this function call. + qthread_fill( & m_qfeb ); + + // The dependent task could now complete and destroy 'this' task + // before the call to 'qthread_fill' returns. Therefore, for + // thread safety assume that 'this' task has now been destroyed. + } + + // Decrement active task count before returning. + Kokkos::atomic_decrement( active_count ); +} + aligned_t Task::qthread_func( void * arg ) { Task * const task = reinterpret_cast< Task * >(arg); @@ -291,62 +341,18 @@ fflush(stdout); #endif member.team_barrier(); - - close_out = member.team_rank() == 0 ; + if ( member.team_rank() == 0 ) task->closeout(); + member.team_barrier(); } else if ( task->m_apply_team && task->m_apply_single == reinterpret_cast(1) ) { // Team hard-wired to one, no cloning Kokkos::Impl::QthreadTeamPolicyMember member ; (*task->m_apply_team)( task , member ); - close_out = true ; + task->closeout(); } else { (*task->m_apply_single)( task ); - - close_out = true ; - } - - if ( close_out ) { - - // When dependent tasks run there would be a race - // condition between destroying this task and - // querying the active count pointer from this task. - int volatile * active_count = task->m_active_count ; - - if ( task->m_state == ( Kokkos::Experimental::TASK_STATE_WAITING | Kokkos::Experimental::TASK_STATE_EXECUTING ) ) { - -#if 0 -fprintf( stdout - , "worker(%d.%d) task 0x%.12lx respawn\n" - , qthread_shep() - , qthread_worker_local(NULL) - , reinterpret_cast(task) - ); -fflush(stdout); -#endif - - // Task respawned, set state to waiting and reschedule the task - task->m_state = Kokkos::Experimental::TASK_STATE_WAITING ; - task->schedule(); - } - else { - - // Task did not respawn, is complete - task->m_state = Kokkos::Experimental::TASK_STATE_COMPLETE ; - - // Release dependences before allowing dependent tasks to run. - // Otherwise there is a thread race condition for removing dependences. - for ( int i = 0 ; i < task->m_dep_size ; ++i ) { - assign( & task->m_dep[i] , 0 ); - } - - // Set qthread FEB to full so that dependent tasks are allowed to execute. - // This 'task' may be deleted immediately following this function call. - qthread_fill( & task->m_qfeb ); - } - - // Decrement active task count before returning. - Kokkos::atomic_decrement( active_count ); + task->closeout(); } #if 0 @@ -419,8 +425,7 @@ fflush(stdout); , NULL , m_dep_size , qprecon /* dependences */ , spawn_shepherd - // , unsigned( QTHREAD_SPAWN_SIMPLE | QTHREAD_SPAWN_LOCAL_PRIORITY ) - , unsigned( QTHREAD_SPAWN_LOCAL_PRIORITY ) + , unsigned( QTHREAD_SPAWN_SIMPLE | QTHREAD_SPAWN_LOCAL_PRIORITY ) , num_worker_per_shepherd - 1 ); } diff --git a/lib/kokkos/core/src/Qthread/Kokkos_Qthread_TaskPolicy.hpp b/lib/kokkos/core/src/Qthread/Kokkos_Qthread_TaskPolicy.hpp index 1f4a622ebe..9ff27de373 100644 --- a/lib/kokkos/core/src/Qthread/Kokkos_Qthread_TaskPolicy.hpp +++ b/lib/kokkos/core/src/Qthread/Kokkos_Qthread_TaskPolicy.hpp @@ -121,6 +121,7 @@ private: } void schedule(); + void closeout(); protected : @@ -490,7 +491,7 @@ public: KOKKOS_INLINE_FUNCTION TaskPolicy( const TaskPolicy & rhs ) : m_default_dependence_capacity( rhs.m_default_dependence_capacity ) - , m_team_size( m_team_size ) + , m_team_size( rhs.m_team_size ) , m_active_count_root(0) , m_active_count( rhs.m_active_count ) {} @@ -499,7 +500,7 @@ public: TaskPolicy( const TaskPolicy & rhs , const unsigned arg_default_dependence_capacity ) : m_default_dependence_capacity( arg_default_dependence_capacity ) - , m_team_size( m_team_size ) + , m_team_size( rhs.m_team_size ) , m_active_count_root(0) , m_active_count( rhs.m_active_count ) {} diff --git a/lib/kokkos/core/src/Threads/Kokkos_ThreadsExec.cpp b/lib/kokkos/core/src/Threads/Kokkos_ThreadsExec.cpp index 99553fccb1..078cc658bf 100644 --- a/lib/kokkos/core/src/Threads/Kokkos_ThreadsExec.cpp +++ b/lib/kokkos/core/src/Threads/Kokkos_ThreadsExec.cpp @@ -1,13 +1,13 @@ /* //@HEADER // ************************************************************************ -// +// // Kokkos v. 2.0 // Copyright (2014) Sandia Corporation -// +// // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, // the U.S. Government retains certain rights in this software. -// +// // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -36,7 +36,7 @@ // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Questions? Contact H. Carter Edwards (hcedwar@sandia.gov) -// +// // ************************************************************************ //@HEADER */ @@ -50,9 +50,7 @@ #include #include #include -#include -#include -#include +#include #include @@ -135,7 +133,11 @@ void ThreadsExec::driver(void) ThreadsExec::ThreadsExec() : m_pool_base(0) +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) , m_scratch() +#else + , m_scratch(0) +#endif , m_scratch_reduce_end(0) , m_scratch_thread_end(0) , m_numa_rank(0) @@ -194,8 +196,25 @@ ThreadsExec::~ThreadsExec() { const unsigned entry = m_pool_size - ( m_pool_rank + 1 ); - m_pool_base = 0 ; +#if defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + + typedef Kokkos::Experimental::Impl::SharedAllocationRecord< Kokkos::HostSpace , void > Record ; + + if ( m_scratch ) { + Record * const r = Record::get_record( m_scratch ); + + m_scratch = 0 ; + + Record::decrement( r ); + } + +#else + m_scratch.clear(); + +#endif + + m_pool_base = 0 ; m_scratch_reduce_end = 0 ; m_scratch_thread_end = 0 ; m_numa_rank = 0 ; @@ -303,6 +322,10 @@ void ThreadsExec::fence() s_current_function = 0 ; s_current_function_arg = 0 ; + + // Make sure function and arguments are cleared before + // potentially re-activating threads with a subsequent launch. + memory_fence(); } /** \brief Begin execution of the asynchronous functor */ @@ -317,6 +340,9 @@ void ThreadsExec::start( void (*func)( ThreadsExec & , const void * ) , const vo s_current_function = func ; s_current_function_arg = arg ; + // Make sure function and arguments are written before activating threads. + memory_fence(); + // Activate threads: for ( int i = s_thread_pool_size[0] ; 0 < i-- ; ) { s_threads_exec[i]->m_pool_state = ThreadsExec::Active ; @@ -376,6 +402,9 @@ void ThreadsExec::execute_serial( void (*func)( ThreadsExec & , const void * ) ) s_current_function = func ; s_current_function_arg = & s_threads_process ; + // Make sure function and arguments are written before activating threads. + memory_fence(); + const unsigned begin = s_threads_process.m_pool_base ? 1 : 0 ; for ( unsigned i = s_thread_pool_size[0] ; begin < i ; ) { @@ -394,6 +423,9 @@ void ThreadsExec::execute_serial( void (*func)( ThreadsExec & , const void * ) ) s_current_function_arg = 0 ; s_current_function = 0 ; + + // Make sure function and arguments are cleared before proceeding. + memory_fence(); } //---------------------------------------------------------------------------- @@ -405,17 +437,51 @@ void * ThreadsExec::root_reduce_scratch() void ThreadsExec::execute_resize_scratch( ThreadsExec & exec , const void * ) { +#if defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + + typedef Kokkos::Experimental::Impl::SharedAllocationRecord< Kokkos::HostSpace , void > Record ; + + if ( exec.m_scratch ) { + Record * const r = Record::get_record( exec.m_scratch ); + + exec.m_scratch = 0 ; + + Record::decrement( r ); + } + +#else + exec.m_scratch.clear(); +#endif + exec.m_scratch_reduce_end = s_threads_process.m_scratch_reduce_end ; exec.m_scratch_thread_end = s_threads_process.m_scratch_thread_end ; if ( s_threads_process.m_scratch_thread_end ) { +#if defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + + // Allocate tracked memory: + { + Record * const r = Record::allocate( Kokkos::HostSpace() , "thread_scratch" , s_threads_process.m_scratch_thread_end ); + + Record::increment( r ); + + exec.m_scratch = r->data(); + } + + unsigned * ptr = reinterpret_cast( exec.m_scratch ); + +#else + exec.m_scratch = HostSpace::allocate_and_track( "thread_scratch" , s_threads_process.m_scratch_thread_end ); unsigned * ptr = reinterpret_cast( exec.m_scratch.alloc_ptr() ); + +#endif + unsigned * const end = ptr + s_threads_process.m_scratch_thread_end / sizeof(unsigned); // touch on this thread @@ -452,7 +518,11 @@ void * ThreadsExec::resize_scratch( size_t reduce_size , size_t thread_size ) s_threads_process.m_scratch = s_threads_exec[0]->m_scratch ; } +#if defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + return s_threads_process.m_scratch ; +#else return s_threads_process.m_scratch.alloc_ptr() ; +#endif } //---------------------------------------------------------------------------- @@ -550,7 +620,8 @@ void ThreadsExec::initialize( unsigned thread_count , // then they will be given default values based upon hwloc detection // and allowed asynchronous execution. - const bool hwloc_avail = hwloc::available(); + const bool hwloc_avail = Kokkos::hwloc::available(); + const bool hwloc_can_bind = hwloc_avail && Kokkos::hwloc::can_bind_threads(); if ( thread_count == 0 ) { thread_count = hwloc_avail @@ -588,7 +659,11 @@ void ThreadsExec::initialize( unsigned thread_count , // If hwloc available then spawned thread will // choose its own entry in 's_threads_coord' // otherwise specify the entry. - s_current_function_arg = (void*)static_cast( hwloc_avail ? ~0u : ith ); + s_current_function_arg = (void*)static_cast( hwloc_can_bind ? ~0u : ith ); + + // Make sure all outstanding memory writes are complete + // before spawning the new thread. + memory_fence(); // Spawn thread executing the 'driver()' function. // Wait until spawned thread has attempted to initialize. @@ -617,9 +692,13 @@ void ThreadsExec::initialize( unsigned thread_count , s_current_function_arg = 0 ; s_threads_process.m_pool_state = ThreadsExec::Inactive ; + memory_fence(); + if ( ! thread_spawn_failed ) { // Bind process to the core on which it was located before spawning occured - Kokkos::hwloc::bind_this_thread( proc_coord ); + if (hwloc_can_bind) { + Kokkos::hwloc::bind_this_thread( proc_coord ); + } if ( thread_spawn_begin ) { // Include process in pool. const std::pair coord = Kokkos::hwloc::get_this_thread_coordinate(); @@ -702,7 +781,9 @@ void ThreadsExec::finalize() s_threads_exec[0] = 0 ; } - Kokkos::hwloc::unbind_this_thread(); + if (Kokkos::hwloc::can_bind_threads() ) { + Kokkos::hwloc::unbind_this_thread(); + } s_thread_pool_size[0] = 0 ; s_thread_pool_size[1] = 0 ; diff --git a/lib/kokkos/core/src/Threads/Kokkos_ThreadsExec.hpp b/lib/kokkos/core/src/Threads/Kokkos_ThreadsExec.hpp index 3820697977..684eac8b7d 100644 --- a/lib/kokkos/core/src/Threads/Kokkos_ThreadsExec.hpp +++ b/lib/kokkos/core/src/Threads/Kokkos_ThreadsExec.hpp @@ -89,7 +89,11 @@ private: ThreadsExec * const * m_pool_base ; ///< Base for pool fan-in +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) Impl::AllocationTracker m_scratch ; +#else + void * m_scratch ; +#endif int m_scratch_reduce_end ; int m_scratch_thread_end ; int m_numa_rank ; @@ -122,9 +126,19 @@ public: static int get_thread_count(); static ThreadsExec * get_thread( const int init_thread_rank ); +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + inline void * reduce_memory() const { return reinterpret_cast(m_scratch.alloc_ptr()); } KOKKOS_INLINE_FUNCTION void * scratch_memory() const { return reinterpret_cast(m_scratch.alloc_ptr()) + m_scratch_reduce_end ; } +#else + + inline void * reduce_memory() const { return m_scratch ; } + KOKKOS_INLINE_FUNCTION void * scratch_memory() const + { return reinterpret_cast(m_scratch) + m_scratch_reduce_end ; } + +#endif + KOKKOS_INLINE_FUNCTION int volatile & state() { return m_pool_state ; } KOKKOS_INLINE_FUNCTION ThreadsExec * const * pool_base() const { return m_pool_base ; } diff --git a/lib/kokkos/core/src/Threads/Kokkos_ThreadsExec_base.cpp b/lib/kokkos/core/src/Threads/Kokkos_ThreadsExec_base.cpp index 40d5efd0fe..ce09248678 100644 --- a/lib/kokkos/core/src/Threads/Kokkos_ThreadsExec_base.cpp +++ b/lib/kokkos/core/src/Threads/Kokkos_ThreadsExec_base.cpp @@ -155,6 +155,7 @@ void ThreadsExec::wait_yield( volatile int & flag , const int value ) #elif defined( KOKKOS_HAVE_WINTHREAD ) /* Windows libraries */ +#include #include #include diff --git a/lib/kokkos/core/src/Threads/Kokkos_ThreadsTeam.hpp b/lib/kokkos/core/src/Threads/Kokkos_ThreadsTeam.hpp index 53b5eb01df..b69d72d78c 100644 --- a/lib/kokkos/core/src/Threads/Kokkos_ThreadsTeam.hpp +++ b/lib/kokkos/core/src/Threads/Kokkos_ThreadsTeam.hpp @@ -423,6 +423,8 @@ private: int m_team_size ; int m_team_alloc ; + size_t m_scratch_size; + inline void init( const int league_size_request , const int team_size_request ) @@ -477,19 +479,68 @@ public: inline int team_size() const { return m_team_size ; } inline int team_alloc() const { return m_team_alloc ; } inline int league_size() const { return m_league_size ; } + inline size_t scratch_size() const { return m_scratch_size ; } /** \brief Specify league size, request team size */ - TeamPolicy( execution_space & , int league_size_request , int team_size_request , int vector_length_request = 1 ) + TeamPolicy( execution_space & + , int league_size_request + , int team_size_request + , int vector_length_request = 1 ) : m_league_size(0) , m_team_size(0) , m_team_alloc(0) + , m_scratch_size ( 0 ) { init(league_size_request,team_size_request); (void) vector_length_request; } - TeamPolicy( int league_size_request , int team_size_request , int vector_length_request = 1 ) + /** \brief Specify league size, request team size */ + TeamPolicy( execution_space & + , int league_size_request + , const Kokkos::AUTO_t & /* team_size_request */ + , int /* vector_length_request */ = 1 ) : m_league_size(0) , m_team_size(0) , m_team_alloc(0) - { init(league_size_request,team_size_request); (void) vector_length_request; } + , m_scratch_size ( 0 ) + { init(league_size_request,execution_space::thread_pool_size(2)); } + + TeamPolicy( int league_size_request + , int team_size_request + , int /* vector_length_request */ = 1 ) + : m_league_size(0) + , m_team_size(0) + , m_team_alloc(0) + , m_scratch_size ( 0 ) + { init(league_size_request,team_size_request); } + + TeamPolicy( int league_size_request + , const Kokkos::AUTO_t & /* team_size_request */ + , int /* vector_length_request */ = 1 ) + : m_league_size(0) + , m_team_size(0) + , m_team_alloc(0) + , m_scratch_size ( 0 ) + { init(league_size_request,execution_space::thread_pool_size(2)); } + + template + TeamPolicy( int league_size_request + , int team_size_request + , const Experimental::TeamScratchRequest & scratch_request ) + : m_league_size(0) + , m_team_size(0) + , m_team_alloc(0) + , m_scratch_size(scratch_request.total(team_size_request)) + { init(league_size_request,team_size_request); } + + + template + TeamPolicy( int league_size_request + , const Kokkos::AUTO_t & /* team_size_request */ + , const Experimental::TeamScratchRequest & scratch_request ) + : m_league_size(0) + , m_team_size(0) + , m_team_alloc(0) + , m_scratch_size(scratch_request.total(execution_space::thread_pool_size(2))) + { init(league_size_request,execution_space::thread_pool_size(2)); } typedef Impl::ThreadsExecTeamMember member_type ; diff --git a/lib/kokkos/core/src/Threads/Kokkos_Threads_Parallel.hpp b/lib/kokkos/core/src/Threads/Kokkos_Threads_Parallel.hpp index 4b2a169126..9e3b0acd3d 100644 --- a/lib/kokkos/core/src/Threads/Kokkos_Threads_Parallel.hpp +++ b/lib/kokkos/core/src/Threads/Kokkos_Threads_Parallel.hpp @@ -45,6 +45,7 @@ #define KOKKOS_THREADS_PARALLEL_HPP #include +#include #include @@ -58,363 +59,440 @@ namespace Impl { //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- +/* ParallelFor Kokkos::Threads with RangePolicy */ template< class FunctorType , class Arg0 , class Arg1 , class Arg2 > -class ParallelFor< FunctorType , Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Threads > > +class ParallelFor< FunctorType + , Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Threads > + > { private: typedef Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Threads > Policy ; + typedef typename Policy::work_tag WorkTag ; + typedef typename Policy::WorkRange WorkRange ; + typedef typename Policy::member_type Member ; - const FunctorType m_func ; + const FunctorType m_functor ; const Policy m_policy ; - template< class PType > - KOKKOS_FORCEINLINE_FUNCTION static - void driver( typename Impl::enable_if< - ( Impl::is_same< typename PType::work_tag , void >::value ) - , const FunctorType & >::type functor - , const PType & range ) + template< class TagType > + inline static + typename std::enable_if< std::is_same< TagType , void >::value >::type + exec_range( const FunctorType & functor + , const Member ibeg , const Member iend ) { - const typename PType::member_type e = range.end(); - for ( typename PType::member_type i = range.begin() ; i < e ; ++i ) { + #if defined( KOKKOS_OPT_RANGE_AGGRESSIVE_VECTORIZATION ) && \ + defined( KOKKOS_HAVE_PRAGMA_IVDEP ) + #pragma ivdep + #endif + for ( Member i = ibeg ; i < iend ; ++i ) { functor( i ); } } - template< class PType > - KOKKOS_FORCEINLINE_FUNCTION static - void driver( typename Impl::enable_if< - ( ! Impl::is_same< typename PType::work_tag , void >::value ) - , const FunctorType & >::type functor - , const PType & range ) + template< class TagType > + inline static + typename std::enable_if< ! std::is_same< TagType , void >::value >::type + exec_range( const FunctorType & functor + , const Member ibeg , const Member iend ) { - const typename PType::member_type e = range.end(); - for ( typename PType::member_type i = range.begin() ; i < e ; ++i ) { - functor( typename PType::work_tag() , i ); + const TagType t{} ; + #if defined( KOKKOS_OPT_RANGE_AGGRESSIVE_VECTORIZATION ) && \ + defined( KOKKOS_HAVE_PRAGMA_IVDEP ) + #pragma ivdep + #endif + for ( Member i = ibeg ; i < iend ; ++i ) { + functor( t , i ); } } - static void execute( ThreadsExec & exec , const void * arg ) + static void exec( ThreadsExec & exec , const void * arg ) { const ParallelFor & self = * ((const ParallelFor *) arg ); - driver( self.m_func , typename Policy::WorkRange( self.m_policy , exec.pool_rank() , exec.pool_size() ) ); + WorkRange range( self.m_policy , exec.pool_rank() , exec.pool_size() ); + + ParallelFor::template exec_range< WorkTag > + ( self.m_functor , range.begin() , range.end() ); exec.fan_in(); } public: - ParallelFor( const FunctorType & functor - , const Policy & policy ) - : m_func( functor ) - , m_policy( policy ) + inline + void execute() const { - ThreadsExec::start( & ParallelFor::execute , this ); - + ThreadsExec::start( & ParallelFor::exec , this ); ThreadsExec::fence(); } + + ParallelFor( const FunctorType & arg_functor + , const Policy & arg_policy ) + : m_functor( arg_functor ) + , m_policy( arg_policy ) + {} }; +//---------------------------------------------------------------------------- +/* ParallelFor Kokkos::Threads with TeamPolicy */ + template< class FunctorType , class Arg0 , class Arg1 > -class ParallelFor< FunctorType , Kokkos::TeamPolicy< Arg0 , Arg1 , Kokkos::Threads > > +class ParallelFor< FunctorType + , Kokkos::TeamPolicy< Arg0 , Arg1 , Kokkos::Threads > + > { private: typedef TeamPolicy< Arg0 , Arg1 , Kokkos::Threads > Policy ; + typedef typename Policy::work_tag WorkTag ; + typedef typename Policy::member_type Member ; - const FunctorType m_func ; + const FunctorType m_functor ; const Policy m_policy ; const int m_shared ; template< class TagType > - KOKKOS_FORCEINLINE_FUNCTION - void driver( typename Impl::enable_if< Impl::is_same< TagType , void >::value , - const typename Policy::member_type & >::type member ) const - { m_func( member ); } + inline static + typename std::enable_if< std::is_same< TagType , void >::value >::type + exec_team( const FunctorType & functor , Member member ) + { + for ( ; member.valid() ; member.next() ) { + functor( member ); + } + } template< class TagType > - KOKKOS_FORCEINLINE_FUNCTION - void driver( typename Impl::enable_if< ! Impl::is_same< TagType , void >::value , - const typename Policy::member_type & >::type member ) const - { m_func( TagType() , member ); } + inline static + typename std::enable_if< ! std::is_same< TagType , void >::value >::type + exec_team( const FunctorType & functor , Member member ) + { + const TagType t{} ; + for ( ; member.valid() ; member.next() ) { + functor( t , member ); + } + } - static void execute( ThreadsExec & exec , const void * arg ) + static void exec( ThreadsExec & exec , const void * arg ) { const ParallelFor & self = * ((const ParallelFor *) arg ); - typename Policy::member_type member( & exec , self.m_policy , self.m_shared ); - - for ( ; member.valid() ; member.next() ) { - self.ParallelFor::template driver< typename Policy::work_tag >( member ); - } + ParallelFor::exec_team< WorkTag > + ( self.m_functor , Member( & exec , self.m_policy , self.m_shared ) ); exec.fan_in(); } public: - ParallelFor( const FunctorType & functor - , const Policy & policy ) - : m_func( functor ) - , m_policy( policy ) - , m_shared( FunctorTeamShmemSize< FunctorType >::value( functor , policy.team_size() ) ) + inline + void execute() const { ThreadsExec::resize_scratch( 0 , Policy::member_type::team_reduce_size() + m_shared ); - ThreadsExec::start( & ParallelFor::execute , this ); + ThreadsExec::start( & ParallelFor::exec , this ); ThreadsExec::fence(); } + + ParallelFor( const FunctorType & arg_functor + , const Policy & arg_policy ) + : m_functor( arg_functor ) + , m_policy( arg_policy ) + , m_shared( arg_policy.scratch_size() + FunctorTeamShmemSize< FunctorType >::value( arg_functor , arg_policy.team_size() ) ) + { } }; - - //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- +/* ParallelReduce with Kokkos::Threads and RangePolicy */ template< class FunctorType , class Arg0 , class Arg1 , class Arg2 > -class ParallelReduce< FunctorType , Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Threads > > +class ParallelReduce< FunctorType + , Kokkos::RangePolicy< Arg0, Arg1, Arg2, Kokkos::Threads > + > { private: - typedef Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Threads > Policy ; - typedef typename Policy::work_tag work_tag ; - typedef Kokkos::Impl::FunctorValueTraits< FunctorType , work_tag > ValueTraits ; - typedef Kokkos::Impl::FunctorValueInit< FunctorType , work_tag > ValueInit ; + typedef Kokkos::RangePolicy< Arg0 , Arg1, Arg2, Kokkos::Threads > Policy ; + + typedef typename Policy::work_tag WorkTag ; + typedef typename Policy::WorkRange WorkRange ; + typedef typename Policy::member_type Member ; + + typedef Kokkos::Impl::FunctorValueTraits< FunctorType, WorkTag > ValueTraits ; + typedef Kokkos::Impl::FunctorValueInit< FunctorType, WorkTag > ValueInit ; typedef typename ValueTraits::pointer_type pointer_type ; typedef typename ValueTraits::reference_type reference_type ; - const FunctorType m_func ; + const FunctorType m_functor ; const Policy m_policy ; + const pointer_type m_result_ptr ; - template< class PType > - KOKKOS_FORCEINLINE_FUNCTION static - void driver( typename Impl::enable_if< - ( Impl::is_same< typename PType::work_tag , void >::value ) - , const FunctorType & >::type functor - , reference_type update - , const PType & range ) + template< class TagType > + inline static + typename std::enable_if< std::is_same< TagType , void >::value >::type + exec_range( const FunctorType & functor + , const Member & ibeg , const Member & iend + , reference_type update ) { - const typename PType::member_type e = range.end(); - for ( typename PType::member_type i = range.begin() ; i < e ; ++i ) { + #if defined( KOKKOS_OPT_RANGE_AGGRESSIVE_VECTORIZATION ) && \ + defined( KOKKOS_HAVE_PRAGMA_IVDEP ) + #pragma ivdep + #endif + for ( Member i = ibeg ; i < iend ; ++i ) { functor( i , update ); } } - template< class PType > - KOKKOS_FORCEINLINE_FUNCTION static - void driver( typename Impl::enable_if< - ( ! Impl::is_same< typename PType::work_tag , void >::value ) - , const FunctorType & >::type functor - , reference_type update - , const PType & range ) + template< class TagType > + inline static + typename std::enable_if< ! std::is_same< TagType , void >::value >::type + exec_range( const FunctorType & functor + , const Member & ibeg , const Member & iend + , reference_type update ) { - const typename PType::member_type e = range.end(); - for ( typename PType::member_type i = range.begin() ; i < e ; ++i ) { - functor( typename PType::work_tag() , i , update ); + const TagType t{} ; + #if defined( KOKKOS_OPT_RANGE_AGGRESSIVE_VECTORIZATION ) && \ + defined( KOKKOS_HAVE_PRAGMA_IVDEP ) + #pragma ivdep + #endif + for ( Member i = ibeg ; i < iend ; ++i ) { + functor( t , i , update ); } } - static void execute( ThreadsExec & exec , const void * arg ) + static void exec( ThreadsExec & exec , const void * arg ) { const ParallelReduce & self = * ((const ParallelReduce *) arg ); + const WorkRange range( self.m_policy, exec.pool_rank(), exec.pool_size() ); - driver( self.m_func - , ValueInit::init( self.m_func , exec.reduce_memory() ) - , typename Policy::WorkRange( self.m_policy , exec.pool_rank() , exec.pool_size() ) - ); + ParallelReduce::template exec_range< WorkTag > + ( self.m_functor , range.begin() , range.end() + , ValueInit::init( self.m_functor , exec.reduce_memory() ) ); - exec.template fan_in_reduce< FunctorType , work_tag >( self.m_func ); + exec.template fan_in_reduce< FunctorType , WorkTag >( self.m_functor ); } public: - template< class HostViewType > - ParallelReduce( const FunctorType & functor , - const Policy & policy , - const HostViewType & result_view ) - : m_func( functor ) - , m_policy( policy ) + inline + void execute() const { - ThreadsExec::resize_scratch( ValueTraits::value_size( m_func ) , 0 ); + ThreadsExec::resize_scratch( ValueTraits::value_size( m_functor ) , 0 ); - ThreadsExec::start( & ParallelReduce::execute , this ); - - const pointer_type data = (pointer_type) ThreadsExec::root_reduce_scratch(); + ThreadsExec::start( & ParallelReduce::exec , this ); ThreadsExec::fence(); - if ( result_view.ptr_on_device() ) { - const unsigned n = ValueTraits::value_count( m_func ); - for ( unsigned i = 0 ; i < n ; ++i ) { result_view.ptr_on_device()[i] = data[i]; } + if ( m_result_ptr ) { + + const pointer_type data = + (pointer_type) ThreadsExec::root_reduce_scratch(); + + const unsigned n = ValueTraits::value_count( m_functor ); + for ( unsigned i = 0 ; i < n ; ++i ) { m_result_ptr[i] = data[i]; } } } + + template< class HostViewType > + ParallelReduce( const FunctorType & arg_functor , + const Policy & arg_policy , + const HostViewType & arg_result_view ) + : m_functor( arg_functor ) + , m_policy( arg_policy ) + , m_result_ptr( arg_result_view.ptr_on_device() ) + { + static_assert( Kokkos::is_view< HostViewType >::value + , "Kokkos::Threads reduce result must be a View" ); + + static_assert( std::is_same< typename HostViewType::memory_space , HostSpace >::value + , "Kokkos::Threads reduce result must be a View in HostSpace" ); + } }; //---------------------------------------------------------------------------- +/* ParallelReduce with Kokkos::Threads and TeamPolicy */ template< class FunctorType , class Arg0 , class Arg1 > -class ParallelReduce< FunctorType , Kokkos::TeamPolicy< Arg0 , Arg1 , Kokkos::Threads > > +class ParallelReduce< FunctorType + , Kokkos::TeamPolicy< Arg0 , Arg1 , Kokkos::Threads > + > { private: - typedef TeamPolicy< Arg0 , Arg1 , Kokkos::Threads > Policy ; - typedef typename Policy::work_tag work_tag ; - typedef Kokkos::Impl::FunctorValueTraits< FunctorType , work_tag > ValueTraits ; - typedef Kokkos::Impl::FunctorValueInit< FunctorType , work_tag > ValueInit ; + typedef TeamPolicy< Arg0 , Arg1 , Kokkos::Threads > Policy ; + typedef typename Policy::work_tag WorkTag ; + typedef typename Policy::member_type Member ; + typedef Kokkos::Impl::FunctorValueTraits< FunctorType, WorkTag > ValueTraits ; + typedef Kokkos::Impl::FunctorValueInit< FunctorType, WorkTag > ValueInit ; typedef typename ValueTraits::pointer_type pointer_type ; typedef typename ValueTraits::reference_type reference_type ; - const FunctorType m_func ; + const FunctorType m_functor ; const Policy m_policy ; + const pointer_type m_result_ptr ; const int m_shared ; template< class TagType > - KOKKOS_FORCEINLINE_FUNCTION - void driver( typename Impl::enable_if< Impl::is_same< TagType , void >::value , - const typename Policy::member_type & >::type member - , reference_type update ) const - { m_func( member , update ); } + inline static + typename std::enable_if< std::is_same< TagType , void >::value >::type + exec_team( const FunctorType & functor , Member member , reference_type update ) + { + for ( ; member.valid() ; member.next() ) { + functor( member , update ); + } + } template< class TagType > - KOKKOS_FORCEINLINE_FUNCTION - void driver( typename Impl::enable_if< ! Impl::is_same< TagType , void >::value , - const typename Policy::member_type & >::type member - , reference_type update ) const - { m_func( TagType() , member , update ); } + inline static + typename std::enable_if< ! std::is_same< TagType , void >::value >::type + exec_team( const FunctorType & functor , Member member , reference_type update ) + { + const TagType t{} ; + for ( ; member.valid() ; member.next() ) { + functor( t , member , update ); + } + } - static void execute( ThreadsExec & exec , const void * arg ) + static void exec( ThreadsExec & exec , const void * arg ) { const ParallelReduce & self = * ((const ParallelReduce *) arg ); - // Initialize thread-local value - reference_type update = ValueInit::init( self.m_func , exec.reduce_memory() ); + ParallelReduce::template exec_team< WorkTag > + ( self.m_functor , Member( & exec , self.m_policy , self.m_shared ) + , ValueInit::init( self.m_functor , exec.reduce_memory() ) ); - typename Policy::member_type member( & exec , self.m_policy , self.m_shared ); - for ( ; member.valid() ; member.next() ) { - self.ParallelReduce::template driver< work_tag >( member , update ); - } - - exec.template fan_in_reduce< FunctorType , work_tag >( self.m_func ); + exec.template fan_in_reduce< FunctorType , WorkTag >( self.m_functor ); } public: - ParallelReduce( const FunctorType & functor - , const Policy & policy ) - : m_func( functor ) - , m_policy( policy ) - , m_shared( FunctorTeamShmemSize< FunctorType >::value( functor , policy.team_size() ) ) + inline + void execute() const { - ThreadsExec::resize_scratch( ValueTraits::value_size( m_func ) , Policy::member_type::team_reduce_size() + m_shared ); + ThreadsExec::resize_scratch( ValueTraits::value_size( m_functor ) , Policy::member_type::team_reduce_size() + m_shared ); - ThreadsExec::start( & ParallelReduce::execute , this ); + ThreadsExec::start( & ParallelReduce::exec , this ); ThreadsExec::fence(); + + if ( m_result_ptr ) { + + const pointer_type data = (pointer_type) ThreadsExec::root_reduce_scratch(); + + const unsigned n = ValueTraits::value_count( m_functor ); + for ( unsigned i = 0 ; i < n ; ++i ) { m_result_ptr[i] = data[i]; } + } } template< class ViewType > - ParallelReduce( const FunctorType & functor - , const Policy & policy - , const ViewType & result ) - : m_func( functor ) - , m_policy( policy ) - , m_shared( FunctorTeamShmemSize< FunctorType >::value( functor , policy.team_size() ) ) - { - ThreadsExec::resize_scratch( ValueTraits::value_size( m_func ) , Policy::member_type::team_reduce_size() + m_shared ); - - ThreadsExec::start( & ParallelReduce::execute , this ); - - const pointer_type data = (pointer_type) ThreadsExec::root_reduce_scratch(); - - ThreadsExec::fence(); - - const unsigned n = ValueTraits::value_count( m_func ); - for ( unsigned i = 0 ; i < n ; ++i ) { result.ptr_on_device()[i] = data[i]; } - } + ParallelReduce( const FunctorType & arg_functor + , const Policy & arg_policy + , const ViewType & arg_result ) + : m_functor( arg_functor ) + , m_policy( arg_policy ) + , m_result_ptr( arg_result.ptr_on_device() ) + , m_shared( arg_policy.scratch_size() + FunctorTeamShmemSize< FunctorType >::value( arg_functor , arg_policy.team_size() ) ) + { } }; //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- +/* ParallelScan with Kokkos::Threads and RangePolicy */ template< class FunctorType , class Arg0 , class Arg1 , class Arg2 > -class ParallelScan< FunctorType , Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Threads > > +class ParallelScan< FunctorType + , Kokkos::RangePolicy< Arg0, Arg1, Arg2, Kokkos::Threads > + > { private: - typedef Kokkos::RangePolicy< Arg0 , Arg1 , Arg2 , Kokkos::Threads > Policy ; - typedef typename Policy::work_tag work_tag ; - typedef Kokkos::Impl::FunctorValueTraits< FunctorType , work_tag > ValueTraits ; - typedef Kokkos::Impl::FunctorValueInit< FunctorType , work_tag > ValueInit ; + typedef Kokkos::RangePolicy< Arg0, Arg1, Arg2, Kokkos::Threads > Policy ; + typedef typename Policy::WorkRange WorkRange ; + typedef typename Policy::work_tag WorkTag ; + typedef typename Policy::member_type Member ; + typedef Kokkos::Impl::FunctorValueTraits< FunctorType, WorkTag > ValueTraits ; + typedef Kokkos::Impl::FunctorValueInit< FunctorType, WorkTag > ValueInit ; typedef typename ValueTraits::pointer_type pointer_type ; typedef typename ValueTraits::reference_type reference_type ; - const FunctorType m_func ; + const FunctorType m_functor ; const Policy m_policy ; - template< class PType > - KOKKOS_FORCEINLINE_FUNCTION static - void driver( typename Impl::enable_if< - ( Impl::is_same< typename PType::work_tag , void >::value ) - , const FunctorType & >::type functor - , reference_type update - , const bool final - , const PType & range ) + template< class TagType > + inline static + typename std::enable_if< std::is_same< TagType , void >::value >::type + exec_range( const FunctorType & functor + , const Member & ibeg , const Member & iend + , reference_type update , const bool final ) { - const typename PType::member_type e = range.end(); - for ( typename PType::member_type i = range.begin() ; i < e ; ++i ) { + #if defined( KOKKOS_OPT_RANGE_AGGRESSIVE_VECTORIZATION ) && \ + defined( KOKKOS_HAVE_PRAGMA_IVDEP ) + #pragma ivdep + #endif + for ( Member i = ibeg ; i < iend ; ++i ) { functor( i , update , final ); } } - template< class PType > - KOKKOS_FORCEINLINE_FUNCTION static - void driver( typename Impl::enable_if< - ( ! Impl::is_same< typename PType::work_tag , void >::value ) - , const FunctorType & >::type functor - , reference_type update - , const bool final - , const PType & range ) + template< class TagType > + inline static + typename std::enable_if< ! std::is_same< TagType , void >::value >::type + exec_range( const FunctorType & functor + , const Member & ibeg , const Member & iend + , reference_type update , const bool final ) { - const typename PType::member_type e = range.end(); - for ( typename PType::member_type i = range.begin() ; i < e ; ++i ) { - functor( typename PType::work_tag() , i , update , final ); + const TagType t{} ; + #if defined( KOKKOS_OPT_RANGE_AGGRESSIVE_VECTORIZATION ) && \ + defined( KOKKOS_HAVE_PRAGMA_IVDEP ) + #pragma ivdep + #endif + for ( Member i = ibeg ; i < iend ; ++i ) { + functor( t , i , update , final ); } } - static void execute( ThreadsExec & exec , const void * arg ) + static void exec( ThreadsExec & exec , const void * arg ) { const ParallelScan & self = * ((const ParallelScan *) arg ); - const typename Policy::WorkRange range( self.m_policy , exec.pool_rank() , exec.pool_size() ); + const WorkRange range( self.m_policy, exec.pool_rank(), exec.pool_size() ); - reference_type update = ValueInit::init( self.m_func , exec.reduce_memory() ); + reference_type update = + ValueInit::init( self.m_functor , exec.reduce_memory() ); - driver( self.m_func , update , false , range ); + ParallelScan::template exec_range< WorkTag > + ( self.m_functor , range.begin(), range.end(), update, false ); - // exec.scan_large( self.m_func ); - exec.template scan_small( self.m_func ); + // exec.template scan_large( self.m_functor ); + exec.template scan_small( self.m_functor ); - driver( self.m_func , update , true , range ); + ParallelScan::template exec_range< WorkTag > + ( self.m_functor , range.begin(), range.end(), update, true ); exec.fan_in(); } public: - ParallelScan( const FunctorType & functor , const Policy & policy ) - : m_func( functor ) - , m_policy( policy ) + inline + void execute() const { - ThreadsExec::resize_scratch( 2 * ValueTraits::value_size( m_func ) , 0 ); - ThreadsExec::start( & ParallelScan::execute , this ); + ThreadsExec::resize_scratch( 2 * ValueTraits::value_size( m_functor ) , 0 ); + ThreadsExec::start( & ParallelScan::exec , this ); ThreadsExec::fence(); } + + ParallelScan( const FunctorType & arg_functor + , const Policy & arg_policy ) + : m_functor( arg_functor ) + , m_policy( arg_policy ) + { } }; } // namespace Impl diff --git a/lib/kokkos/core/src/impl/CMakeLists.txt b/lib/kokkos/core/src/impl/CMakeLists.txt new file mode 100644 index 0000000000..c543194de3 --- /dev/null +++ b/lib/kokkos/core/src/impl/CMakeLists.txt @@ -0,0 +1,18 @@ + +SET(HEADERS "") +SET(SOURCES "") + +FILE(GLOB HEADERS *.hpp) +FILE(GLOB SOURCES *.cpp) + +TRIBITS_ADD_LIBRARY( + kokkoscore_impl + NOINSTALLHEADERS ${HEADERS} + SOURCES ${SOURCES} + DEPLIBS + ) + +SET(TRILINOS_INCDIR ${CMAKE_INSTALL_PREFIX}/${${PROJECT_NAME}_INSTALL_INCLUDE_DIR}) + +INSTALL(FILES ${HEADERS} DESTINATION ${TRILINOS_INCDIR}/impl/) + diff --git a/lib/kokkos/core/src/impl/KokkosExp_SharedAlloc.cpp b/lib/kokkos/core/src/impl/KokkosExp_SharedAlloc.cpp index 50168fe3cc..e14929d163 100644 --- a/lib/kokkos/core/src/impl/KokkosExp_SharedAlloc.cpp +++ b/lib/kokkos/core/src/impl/KokkosExp_SharedAlloc.cpp @@ -47,6 +47,27 @@ namespace Kokkos { namespace Experimental { namespace Impl { +int SharedAllocationRecord< void , void >::s_tracking_enabled = 1 ; + +void SharedAllocationRecord< void , void >::tracking_claim_and_disable() +{ + // A host thread claim and disable tracking flag + + while ( ! Kokkos::atomic_compare_exchange_strong( & s_tracking_enabled, 1, 0 ) ); +} + +void SharedAllocationRecord< void , void >::tracking_release_and_enable() +{ + // The host thread that claimed and disabled the tracking flag + // now release and enable tracking. + + if ( ! Kokkos::atomic_compare_exchange_strong( & s_tracking_enabled, 0, 1 ) ){ + Kokkos::Impl::throw_runtime_exception("Kokkos::Experimental::Impl::SharedAllocationRecord<>::tracking_release_and_enable FAILED, this host process thread did not hold the lock" ); + } +} + +//---------------------------------------------------------------------------- + bool SharedAllocationRecord< void , void >:: is_sane( SharedAllocationRecord< void , void > * arg_record ) @@ -61,7 +82,7 @@ is_sane( SharedAllocationRecord< void , void > * arg_record ) SharedAllocationRecord * root_next = 0 ; // Lock the list: - while ( ( root_next = Kokkos::atomic_exchange( & root->m_next , zero ) ) == 0 ); + while ( ( root_next = Kokkos::atomic_exchange( & root->m_next , zero ) ) == zero ); for ( SharedAllocationRecord * rec = root_next ; ok && rec != root ; rec = rec->m_next ) { const bool ok_non_null = rec && rec->m_prev && ( rec == root || rec->m_next ); @@ -73,14 +94,25 @@ is_sane( SharedAllocationRecord< void , void > * arg_record ) ok = ok_root && ok_prev_next && ok_next_prev && ok_count ; if ( ! ok ) { - fprintf(stderr,"Kokkos::Experimental::Impl::SharedAllocationRecord failed is_sane: rec(0x%.12lx){ m_count(%d) m_root(0x%.12lx) m_next(0x%.12lx) m_prev(0x%.12lx) m_next->m_prev(0x%.12lx) m_prev->m_next(0x%.12lx) }\n" - , reinterpret_cast< unsigned long >( rec ) + //Formatting dependent on sizeof(uintptr_t) + const char * format_string; + + if (sizeof(uintptr_t) == sizeof(unsigned long)) { + format_string = "Kokkos::Experimental::Impl::SharedAllocationRecord failed is_sane: rec(0x%.12lx){ m_count(%d) m_root(0x%.12lx) m_next(0x%.12lx) m_prev(0x%.12lx) m_next->m_prev(0x%.12lx) m_prev->m_next(0x%.12lx) }\n"; + } + else if (sizeof(uintptr_t) == sizeof(unsigned long long)) { + format_string = "Kokkos::Experimental::Impl::SharedAllocationRecord failed is_sane: rec(0x%.12llx){ m_count(%d) m_root(0x%.12llx) m_next(0x%.12llx) m_prev(0x%.12llx) m_next->m_prev(0x%.12llx) m_prev->m_next(0x%.12llx) }\n"; + } + + fprintf(stderr + , format_string + , reinterpret_cast< uintptr_t >( rec ) , rec->m_count - , reinterpret_cast< unsigned long >( rec->m_root ) - , reinterpret_cast< unsigned long >( rec->m_next ) - , reinterpret_cast< unsigned long >( rec->m_prev ) - , reinterpret_cast< unsigned long >( rec->m_next->m_prev ) - , reinterpret_cast< unsigned long >( rec->m_prev != rec->m_root ? rec->m_prev->m_next : root_next ) + , reinterpret_cast< uintptr_t >( rec->m_root ) + , reinterpret_cast< uintptr_t >( rec->m_next ) + , reinterpret_cast< uintptr_t >( rec->m_prev ) + , reinterpret_cast< uintptr_t >( rec->m_next->m_prev ) + , reinterpret_cast< uintptr_t >( rec->m_prev != rec->m_root ? rec->m_prev->m_next : root_next ) ); } @@ -102,7 +134,7 @@ SharedAllocationRecord::find( SharedAllocationRecord * con SharedAllocationRecord * root_next = 0 ; // Lock the list: - while ( ( root_next = Kokkos::atomic_exchange( & arg_root->m_next , 0 ) ) == 0 ); + while ( ( root_next = Kokkos::atomic_exchange( & arg_root->m_next , zero ) ) == zero ); // Iterate searching for the record with this data pointer @@ -148,7 +180,7 @@ SharedAllocationRecord( SharedAllocationRecord * arg_root m_prev = m_root ; // Read root->m_next and lock by setting to zero - while ( ( m_next = Kokkos::atomic_exchange( & m_root->m_next , zero ) ) == 0 ); + while ( ( m_next = Kokkos::atomic_exchange( & m_root->m_next , zero ) ) == zero ); m_next->m_prev = this ; @@ -187,7 +219,7 @@ decrement( SharedAllocationRecord< void , void > * arg_record ) SharedAllocationRecord * root_next = 0 ; // Lock the list: - while ( ( root_next = Kokkos::atomic_exchange( & arg_record->m_root->m_next , 0 ) ) == 0 ); + while ( ( root_next = Kokkos::atomic_exchange( & arg_record->m_root->m_next , zero ) ) == zero ); arg_record->m_next->m_prev = arg_record->m_prev ; @@ -232,16 +264,26 @@ print_host_accessible_records( std::ostream & s if ( detail ) { do { + //Formatting dependent on sizeof(uintptr_t) + const char * format_string; - snprintf( buffer , 256 , "%s addr( 0x%.12lx ) list( 0x%.12lx 0x%.12lx ) extent[ 0x%.12lx + %.8ld ] count(%d) dealloc(0x%.12lx) %s\n" + if (sizeof(uintptr_t) == sizeof(unsigned long)) { + format_string = "%s addr( 0x%.12lx ) list( 0x%.12lx 0x%.12lx ) extent[ 0x%.12lx + %.8ld ] count(%d) dealloc(0x%.12lx) %s\n"; + } + else if (sizeof(uintptr_t) == sizeof(unsigned long long)) { + format_string = "%s addr( 0x%.12llx ) list( 0x%.12llx 0x%.12llx ) extent[ 0x%.12llx + %.8ld ] count(%d) dealloc(0x%.12llx) %s\n"; + } + + snprintf( buffer , 256 + , format_string , space_name - , reinterpret_cast( r ) - , reinterpret_cast( r->m_prev ) - , reinterpret_cast( r->m_next ) - , reinterpret_cast( r->m_alloc_ptr ) + , reinterpret_cast( r ) + , reinterpret_cast( r->m_prev ) + , reinterpret_cast( r->m_next ) + , reinterpret_cast( r->m_alloc_ptr ) , r->m_alloc_size , r->m_count - , reinterpret_cast( r->m_dealloc ) + , reinterpret_cast( r->m_dealloc ) , r->m_alloc_ptr->m_label ); std::cout << buffer ; @@ -251,10 +293,20 @@ print_host_accessible_records( std::ostream & s else { do { if ( r->m_alloc_ptr ) { + //Formatting dependent on sizeof(uintptr_t) + const char * format_string; - snprintf( buffer , 256 , "%s [ 0x%.12lx + %ld ] %s\n" + if (sizeof(uintptr_t) == sizeof(unsigned long)) { + format_string = "%s [ 0x%.12lx + %ld ] %s\n"; + } + else if (sizeof(uintptr_t) == sizeof(unsigned long long)) { + format_string = "%s [ 0x%.12llx + %ld ] %s\n"; + } + + snprintf( buffer , 256 + , format_string , space_name - , reinterpret_cast< unsigned long >( r->data() ) + , reinterpret_cast< uintptr_t >( r->data() ) , r->size() , r->m_alloc_ptr->m_label ); diff --git a/lib/kokkos/core/src/impl/KokkosExp_SharedAlloc.hpp b/lib/kokkos/core/src/impl/KokkosExp_SharedAlloc.hpp index c8c553731a..f6fbe0b374 100644 --- a/lib/kokkos/core/src/impl/KokkosExp_SharedAlloc.hpp +++ b/lib/kokkos/core/src/impl/KokkosExp_SharedAlloc.hpp @@ -41,6 +41,9 @@ //@HEADER */ +#ifndef KOKKOS_SHARED_ALLOC_HPP_ +#define KOKKOS_SHARED_ALLOC_HPP_ + namespace Kokkos { namespace Experimental { namespace Impl { @@ -78,6 +81,8 @@ protected: typedef void (* function_type )( SharedAllocationRecord * ); + static int s_tracking_enabled ; + SharedAllocationHeader * const m_alloc_ptr ; size_t const m_alloc_size ; function_type const m_dealloc ; @@ -100,6 +105,18 @@ protected: public: + static int tracking_enabled() { return s_tracking_enabled ; } + + /**\brief A host process thread claims and disables the + * shared allocation tracking flag. + */ + static void tracking_claim_and_disable(); + + /**\brief A host process thread releases and enables the + * shared allocation tracking flag. + */ + static void tracking_release_and_enable(); + ~SharedAllocationRecord() = default ; constexpr SharedAllocationRecord() @@ -148,6 +165,25 @@ public: , const bool detail ); }; +namespace { + +/* Taking the address of this function so make sure it is unique */ +template < class MemorySpace , class DestroyFunctor > +void deallocate( SharedAllocationRecord * record_ptr ) +{ + typedef SharedAllocationRecord< MemorySpace , void > base_type ; + typedef SharedAllocationRecord< MemorySpace , DestroyFunctor > this_type ; + + this_type * const ptr = static_cast< this_type * >( + static_cast< base_type * >( record_ptr ) ); + + ptr->m_destroy.destroy_shared_allocation(); + + delete ptr ; +} + +} + /* * Memory space specialization of SharedAllocationRecord< Space , void > requires : * @@ -158,25 +194,23 @@ public: * Space m_space ; * } */ - template< class MemorySpace , class DestroyFunctor > class SharedAllocationRecord : public SharedAllocationRecord< MemorySpace , void > { private: - static void deallocate( SharedAllocationRecord * record_ptr ) - { delete static_cast*>(record_ptr); } - SharedAllocationRecord( const MemorySpace & arg_space , const std::string & arg_label , const size_t arg_alloc ) /* Allocate user memory as [ SharedAllocationHeader , user_memory ] */ - : SharedAllocationRecord< MemorySpace , void >( arg_space , arg_label , arg_alloc , & deallocate ) + : SharedAllocationRecord< MemorySpace , void >( arg_space , arg_label , arg_alloc , & Kokkos::Experimental::Impl::deallocate< MemorySpace , DestroyFunctor > ) , m_destroy() {} - ~SharedAllocationRecord() { m_destroy.destroy_shared_allocation(); } + SharedAllocationRecord() = delete ; + SharedAllocationRecord( const SharedAllocationRecord & ) = delete ; + SharedAllocationRecord & operator = ( const SharedAllocationRecord & ) = delete ; public: @@ -204,42 +238,48 @@ private: typedef SharedAllocationRecord Record ; - enum : unsigned long { - DO_NOT_DEREF_FLAG = 0x01ul - }; + enum : uintptr_t { DO_NOT_DEREF_FLAG = 0x01ul }; // The allocation record resides in Host memory space - Record * m_record ; - unsigned long m_record_bits; - - KOKKOS_INLINE_FUNCTION - static Record * disable( Record * rec ) - { return reinterpret_cast( reinterpret_cast( rec ) & DO_NOT_DEREF_FLAG ); } - - KOKKOS_INLINE_FUNCTION - void increment() const - { -#if defined( KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_HOST ) - if ( ! ( m_record_bits & DO_NOT_DEREF_FLAG ) ) Record::increment( m_record ); -#endif - } - - KOKKOS_INLINE_FUNCTION - void decrement() const - { -#if defined( KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_HOST ) - if ( ! ( m_record_bits & DO_NOT_DEREF_FLAG ) ) Record::decrement( m_record ); -#endif - } + Record * m_record ; + uintptr_t m_record_bits ; public: - KOKKOS_INLINE_FUNCTION - constexpr SharedAllocationTracker() : m_record_bits( DO_NOT_DEREF_FLAG ) {} + // Use macros instead of inline functions to reduce + // pressure on compiler optimization by reducing + // number of symbols and inline functons. + +#if defined( KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_HOST ) + +#define KOKKOS_SHARED_ALLOCATION_TRACKER_ENABLED \ + Record::tracking_enabled() + +#define KOKKOS_SHARED_ALLOCATION_TRACKER_INCREMENT \ + if ( ! ( m_record_bits & DO_NOT_DEREF_FLAG ) ) Record::increment( m_record ); + +#define KOKKOS_SHARED_ALLOCATION_TRACKER_DECREMENT \ + if ( ! ( m_record_bits & DO_NOT_DEREF_FLAG ) ) Record::decrement( m_record ); + +#else + +#define KOKKOS_SHARED_ALLOCATION_TRACKER_ENABLED 0 + +#define KOKKOS_SHARED_ALLOCATION_TRACKER_INCREMENT /* */ + +#define KOKKOS_SHARED_ALLOCATION_TRACKER_DECREMENT /* */ + +#endif + + /** \brief Assign a specialized record */ + inline + void assign_allocated_record_to_uninitialized( Record * arg_record ) + { Record::increment( m_record = arg_record ); } template< class MemorySpace > constexpr - SharedAllocationRecord< MemorySpace , void > & get_record() const + SharedAllocationRecord< MemorySpace , void > & + get_record() const { return * static_cast< SharedAllocationRecord< MemorySpace , void > * >( m_record ); } template< class MemorySpace > @@ -252,36 +292,92 @@ public: } KOKKOS_INLINE_FUNCTION - SharedAllocationTracker( Record * arg_record ) - : m_record( arg_record ) { increment(); } - - KOKKOS_INLINE_FUNCTION - ~SharedAllocationTracker() { decrement(); } - - KOKKOS_INLINE_FUNCTION - SharedAllocationTracker( const SharedAllocationTracker & rhs ) - : m_record( rhs.m_record ) { increment(); } - - KOKKOS_INLINE_FUNCTION - SharedAllocationTracker( SharedAllocationTracker && rhs ) - : m_record( rhs.m_record ) { rhs.m_record_bits = DO_NOT_DEREF_FLAG ; } - - KOKKOS_INLINE_FUNCTION - SharedAllocationTracker & operator = ( const SharedAllocationTracker & rhs ) + int use_count() const { - decrement(); - m_record = rhs.m_record ; - increment(); - return *this ; +#if defined( KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_HOST ) + Record * const tmp = reinterpret_cast( m_record_bits & ~DO_NOT_DEREF_FLAG ); + return ( tmp ? tmp->use_count() : 0 ); +#else + return 0 ; +#endif } - KOKKOS_INLINE_FUNCTION + KOKKOS_FORCEINLINE_FUNCTION + ~SharedAllocationTracker() + { KOKKOS_SHARED_ALLOCATION_TRACKER_DECREMENT } + + KOKKOS_FORCEINLINE_FUNCTION + constexpr SharedAllocationTracker() + : m_record_bits( DO_NOT_DEREF_FLAG ) {} + + // Move: + + KOKKOS_FORCEINLINE_FUNCTION + SharedAllocationTracker( SharedAllocationTracker && rhs ) + : m_record_bits( rhs.m_record_bits ) + { rhs.m_record_bits = DO_NOT_DEREF_FLAG ; } + + KOKKOS_FORCEINLINE_FUNCTION SharedAllocationTracker & operator = ( SharedAllocationTracker && rhs ) { - m_record = rhs.m_record ; + // If this is tracking then must decrement + KOKKOS_SHARED_ALLOCATION_TRACKER_DECREMENT + // Move and reset RHS to default constructed value. + m_record_bits = rhs.m_record_bits ; rhs.m_record_bits = DO_NOT_DEREF_FLAG ; return *this ; } + + // Copy: + + KOKKOS_FORCEINLINE_FUNCTION + SharedAllocationTracker( const SharedAllocationTracker & rhs ) + : m_record_bits( KOKKOS_SHARED_ALLOCATION_TRACKER_ENABLED + ? rhs.m_record_bits + : rhs.m_record_bits | DO_NOT_DEREF_FLAG ) + { + KOKKOS_SHARED_ALLOCATION_TRACKER_INCREMENT + } + + /** \brief Copy construction may disable tracking. */ + KOKKOS_FORCEINLINE_FUNCTION + SharedAllocationTracker( const SharedAllocationTracker & rhs + , const bool enable_tracking ) + : m_record_bits( KOKKOS_SHARED_ALLOCATION_TRACKER_ENABLED + && enable_tracking + ? rhs.m_record_bits + : rhs.m_record_bits | DO_NOT_DEREF_FLAG ) + { KOKKOS_SHARED_ALLOCATION_TRACKER_INCREMENT } + + KOKKOS_FORCEINLINE_FUNCTION + SharedAllocationTracker & operator = ( const SharedAllocationTracker & rhs ) + { + // If this is tracking then must decrement + KOKKOS_SHARED_ALLOCATION_TRACKER_DECREMENT + m_record_bits = KOKKOS_SHARED_ALLOCATION_TRACKER_ENABLED + ? rhs.m_record_bits + : rhs.m_record_bits | DO_NOT_DEREF_FLAG ; + KOKKOS_SHARED_ALLOCATION_TRACKER_INCREMENT + return *this ; + } + + /** \brief Copy assignment may disable tracking */ + KOKKOS_FORCEINLINE_FUNCTION + void assign( const SharedAllocationTracker & rhs + , const bool enable_tracking ) + { + KOKKOS_SHARED_ALLOCATION_TRACKER_DECREMENT + m_record_bits = KOKKOS_SHARED_ALLOCATION_TRACKER_ENABLED + && enable_tracking + ? rhs.m_record_bits + : rhs.m_record_bits | DO_NOT_DEREF_FLAG ; + KOKKOS_SHARED_ALLOCATION_TRACKER_INCREMENT + } + +#undef KOKKOS_SHARED_ALLOCATION_TRACKER_ENABLED +#undef KOKKOS_SHARED_ALLOCATION_TRACKER_INCREMENT +#undef KOKKOS_SHARED_ALLOCATION_TRACKER_DECREMENT + }; @@ -289,4 +385,4 @@ public: } /* namespace Experimental */ } /* namespace Kokkos */ - +#endif diff --git a/lib/kokkos/core/src/impl/KokkosExp_ViewAllocProp.hpp b/lib/kokkos/core/src/impl/KokkosExp_ViewAllocProp.hpp index 348ccaf5ed..d571a1ea0c 100644 --- a/lib/kokkos/core/src/impl/KokkosExp_ViewAllocProp.hpp +++ b/lib/kokkos/core/src/impl/KokkosExp_ViewAllocProp.hpp @@ -47,6 +47,28 @@ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- +#if defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + +namespace Kokkos { + +/* For backward compatibility */ + +struct ViewAllocateWithoutInitializing { + + const std::string label ; + + ViewAllocateWithoutInitializing() : label() {} + ViewAllocateWithoutInitializing( const std::string & arg_label ) : label( arg_label ) {} + ViewAllocateWithoutInitializing( const char * const arg_label ) : label( arg_label ) {} +}; + +} /* namespace Kokkos */ + +#endif + +//---------------------------------------------------------------------------- +//---------------------------------------------------------------------------- + namespace Kokkos { namespace Experimental { namespace Impl { diff --git a/lib/kokkos/core/src/impl/KokkosExp_ViewArray.hpp b/lib/kokkos/core/src/impl/KokkosExp_ViewArray.hpp index 6f49c57b3c..432d29ab32 100644 --- a/lib/kokkos/core/src/impl/KokkosExp_ViewArray.hpp +++ b/lib/kokkos/core/src/impl/KokkosExp_ViewArray.hpp @@ -50,8 +50,8 @@ namespace Kokkos { namespace Experimental { namespace Impl { -template< class DataType , class V , long N , class P , class ArrayLayout > -struct ViewDataAnalysis< DataType , Kokkos::Array , ArrayLayout > +template< class DataType , class ArrayLayout , class V , size_t N , class P > +struct ViewDataAnalysis< DataType , ArrayLayout , Kokkos::Array > { private: @@ -73,15 +73,7 @@ private: , typename array_analysis::const_value_type >::value }; - typedef ViewDimension< ( dimension::rank == 0 ? N : dimension::arg_N0 ) - , ( dimension::rank == 1 ? N : dimension::arg_N1 ) - , ( dimension::rank == 2 ? N : dimension::arg_N2 ) - , ( dimension::rank == 3 ? N : dimension::arg_N3 ) - , ( dimension::rank == 4 ? N : dimension::arg_N4 ) - , ( dimension::rank == 5 ? N : dimension::arg_N5 ) - , ( dimension::rank == 6 ? N : dimension::arg_N6 ) - , ( dimension::rank == 7 ? N : dimension::arg_N7 ) - > array_scalar_dimension ; + typedef typename dimension::template append::type array_scalar_dimension ; typedef typename std::conditional< is_const , const V , V >::type scalar_type ; typedef V non_const_scalar_type ; @@ -113,18 +105,18 @@ namespace Impl { /** \brief View mapping for non-specialized data type and standard layout */ template< class Traits > -class ViewMapping< Traits , void , - typename std::enable_if<( std::is_same< typename Traits::specialize , Kokkos::Array<> >::value && - ( std::is_same< typename Traits::array_layout , Kokkos::LayoutLeft >::value || - std::is_same< typename Traits::array_layout , Kokkos::LayoutRight >::value || - std::is_same< typename Traits::array_layout , Kokkos::LayoutStride >::value ) - )>::type > +class ViewMapping< Traits , + typename std::enable_if<( + std::is_same< typename Traits::specialize , Kokkos::Array<> >::value && + ( std::is_same< typename Traits::array_layout , Kokkos::LayoutLeft >::value || + std::is_same< typename Traits::array_layout , Kokkos::LayoutRight >::value || + std::is_same< typename Traits::array_layout , Kokkos::LayoutStride >::value ) + )>::type > { private: - template< class , class , typename > friend class ViewMapping ; - template< class , bool , bool , bool , bool , bool , bool , bool , bool , class > friend struct SubviewMapping ; - template< class , class , class , class > friend class Kokkos::Experimental::View ; + template< class , class ... > friend class ViewMapping ; + template< class , class ... > friend class Kokkos::Experimental::View ; typedef ViewOffset< typename Traits::dimension , typename Traits::array_layout @@ -187,16 +179,20 @@ public: // Range span /** \brief Span of the mapped range */ - KOKKOS_INLINE_FUNCTION constexpr size_t span() const { return m_offset.span(); } + KOKKOS_INLINE_FUNCTION constexpr size_t span() const + { return m_offset.span() * Array_N ; } /** \brief Is the mapped range span contiguous */ - KOKKOS_INLINE_FUNCTION constexpr bool span_is_contiguous() const { return m_offset.span_is_contiguous(); } + KOKKOS_INLINE_FUNCTION constexpr bool span_is_contiguous() const + { return m_offset.span_is_contiguous(); } typedef typename std::conditional< is_contiguous_reference , contiguous_reference , strided_reference >::type reference_type ; + typedef handle_type pointer_type ; + /** \brief If data references are lvalue_reference than can query pointer to memory */ - KOKKOS_INLINE_FUNCTION constexpr typename Traits::value_type * data() const - { return (typename Traits::value_type *) 0 ; } + KOKKOS_INLINE_FUNCTION constexpr pointer_type data() const + { return m_handle ; } //---------------------------------------- // The View class performs all rank and bounds checking before @@ -259,14 +255,14 @@ public: private: enum { MemorySpanMask = 8 - 1 /* Force alignment on 8 byte boundary */ }; - enum { MemorySpanSize = sizeof(typename Traits::value_type) }; + enum { MemorySpanSize = sizeof(scalar_type) }; public: /** \brief Span, in bytes, of the referenced memory */ KOKKOS_INLINE_FUNCTION constexpr size_t memory_span() const { - return ( m_stride * sizeof(typename Traits::value_type) + MemorySpanMask ) & ~size_t(MemorySpanMask); + return ( m_offset.span() * Array_N * MemorySpanSize + MemorySpanMask ) & ~size_t(MemorySpanMask); } /** \brief Span, in bytes, of the required memory */ @@ -277,7 +273,7 @@ public: , const size_t N4 , const size_t N5 , const size_t N6 , const size_t N7 ) { typedef std::integral_constant< unsigned , AllowPadding ? MemorySpanSize : 0 > padding ; - return ( offset_type( padding(), N0, N1, N2, N3, N4, N5, N6, N7 ).span() * MemorySpanSize + MemorySpanMask ) & ~size_t(MemorySpanMask); + return ( offset_type( padding(), N0, N1, N2, N3, N4, N5, N6, N7 ).span() * Array_N * MemorySpanSize + MemorySpanMask ) & ~size_t(MemorySpanMask); } /** \brief Span, in bytes, of the required memory */ @@ -286,7 +282,7 @@ public: static constexpr size_t memory_span( const std::integral_constant & , const typename Traits::array_layout & layout ) { - return ( offset_type( layout ).span() * MemorySpanSize + MemorySpanMask ) & ~size_t(MemorySpanMask); + return ( offset_type( layout ).span() * Array_N * MemorySpanSize + MemorySpanMask ) & ~size_t(MemorySpanMask); } //---------------------------------------- @@ -305,11 +301,11 @@ public: template< bool AllowPadding > KOKKOS_INLINE_FUNCTION - ViewMapping( void * ptr + ViewMapping( pointer_type ptr , const std::integral_constant & , const size_t N0 , const size_t N1 , const size_t N2 , const size_t N3 , const size_t N4 , const size_t N5 , const size_t N6 , const size_t N7 ) - : m_handle( reinterpret_cast< handle_type >( ptr ) ) + : m_handle( ptr ) , m_offset( std::integral_constant< unsigned , AllowPadding ? sizeof(typename Traits::value_type) : 0 >() , N0, N1, N2, N3, N4, N5, N6, N7 ) , m_stride( m_offset.span() ) @@ -317,10 +313,10 @@ public: template< bool AllowPadding > KOKKOS_INLINE_FUNCTION - ViewMapping( void * ptr + ViewMapping( pointer_type ptr , const std::integral_constant & , const typename Traits::array_layout & layout ) - : m_handle( reinterpret_cast< handle_type >( ptr ) ) + : m_handle( ptr ) , m_offset( layout ) , m_stride( m_offset.span() ) {} @@ -340,7 +336,8 @@ public: { typedef Kokkos::RangePolicy< ExecSpace , size_t > Policy ; - (void) Kokkos::Impl::ParallelFor< ViewMapping , Policy >( *this , Policy( 0 , m_stride ) ); + const Kokkos::Impl::ParallelFor< ViewMapping , Policy > closure( *this , Policy( 0 , m_stride ) ); + closure.execute(); ExecSpace::fence(); } @@ -379,8 +376,8 @@ public: enum { is_assignable = true }; typedef Kokkos::Experimental::Impl::SharedAllocationTracker TrackType ; - typedef ViewMapping< DstTraits , void , void > DstType ; - typedef ViewMapping< SrcTraits , void , void > SrcType ; + typedef ViewMapping< DstTraits , void > DstType ; + typedef ViewMapping< SrcTraits , void > SrcType ; KOKKOS_INLINE_FUNCTION static void assign( DstType & dst , const SrcType & src , const TrackType & src_track ) @@ -438,8 +435,8 @@ public: std::is_same< typename DstTraits::array_layout , typename SrcTraits::array_layout >::value }; typedef Kokkos::Experimental::Impl::SharedAllocationTracker TrackType ; - typedef ViewMapping< DstTraits , void , void > DstType ; - typedef ViewMapping< SrcTraits , void , void > SrcType ; + typedef ViewMapping< DstTraits , void > DstType ; + typedef ViewMapping< SrcTraits , void > SrcType ; KOKKOS_INLINE_FUNCTION static void assign( DstType & dst , const SrcType & src , const TrackType & src_track ) @@ -452,6 +449,7 @@ public: // Arguments beyond the destination rank are ignored. if ( src.span_is_contiguous() ) { // not padded dst.m_offset = dst_offset_type( std::integral_constant() + , ( 0 < SrcType::Rank ? src.dimension_0() : SrcTraits::value_type::size() ) , ( 1 < SrcType::Rank ? src.dimension_1() : SrcTraits::value_type::size() ) , ( 2 < SrcType::Rank ? src.dimension_2() : SrcTraits::value_type::size() ) , ( 3 < SrcType::Rank ? src.dimension_3() : SrcTraits::value_type::size() ) @@ -483,34 +481,47 @@ public: //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- -/** \brief View mapping for non-specialized data type and standard layout */ -template< class Traits , bool R0 , bool R1 , bool R2 , bool R3 , bool R4 , bool R5 , bool R6 , bool R7 > -struct SubviewMapping< Traits, R0, R1, R2, R3, R4, R5, R6, R7 , - typename std::enable_if<( - std::is_same< typename Traits::specialize , Kokkos::Array<> >::value - && - ( - std::is_same< typename Traits::array_layout , Kokkos::LayoutLeft >::value || - std::is_same< typename Traits::array_layout , Kokkos::LayoutRight >::value || - std::is_same< typename Traits::array_layout , Kokkos::LayoutStride >::value - ) - )>::type > +template< class SrcTraits , class ... Args > +struct ViewMapping + < typename std::enable_if<( + std::is_same< typename SrcTraits::specialize , Kokkos::Array<> >::value + && + ( + std::is_same< typename SrcTraits::array_layout , Kokkos::LayoutLeft >::value || + std::is_same< typename SrcTraits::array_layout , Kokkos::LayoutRight >::value || + std::is_same< typename SrcTraits::array_layout , Kokkos::LayoutStride >::value + ) + )>::type + , SrcTraits + , Args ... > { private: - // Subview's rank + static_assert( SrcTraits::rank == sizeof...(Args) , "" ); + + enum : bool + { R0 = is_integral_extent<0,Args...>::value + , R1 = is_integral_extent<1,Args...>::value + , R2 = is_integral_extent<2,Args...>::value + , R3 = is_integral_extent<3,Args...>::value + , R4 = is_integral_extent<4,Args...>::value + , R5 = is_integral_extent<5,Args...>::value + , R6 = is_integral_extent<6,Args...>::value + , R7 = is_integral_extent<7,Args...>::value + }; + enum { rank = unsigned(R0) + unsigned(R1) + unsigned(R2) + unsigned(R3) + unsigned(R4) + unsigned(R5) + unsigned(R6) + unsigned(R7) }; // Whether right-most rank is a range. - enum { R0_rev = 0 == Traits::rank ? false : ( - 1 == Traits::rank ? R0 : ( - 2 == Traits::rank ? R1 : ( - 3 == Traits::rank ? R2 : ( - 4 == Traits::rank ? R3 : ( - 5 == Traits::rank ? R4 : ( - 6 == Traits::rank ? R5 : ( - 7 == Traits::rank ? R6 : R7 ))))))) }; + enum { R0_rev = 0 == SrcTraits::rank ? false : ( + 1 == SrcTraits::rank ? R0 : ( + 2 == SrcTraits::rank ? R1 : ( + 3 == SrcTraits::rank ? R2 : ( + 4 == SrcTraits::rank ? R3 : ( + 5 == SrcTraits::rank ? R4 : ( + 6 == SrcTraits::rank ? R5 : ( + 7 == SrcTraits::rank ? R6 : R7 ))))))) }; // Subview's layout typedef typename std::conditional< @@ -519,15 +530,15 @@ private: || // OutputRank 1 or 2, InputLayout Left, Interval 0 // because single stride one or second index has a stride. - ( rank <= 2 && R0 && std::is_same< typename Traits::array_layout , Kokkos::LayoutLeft >::value ) + ( rank <= 2 && R0 && std::is_same< typename SrcTraits::array_layout , Kokkos::LayoutLeft >::value ) || // OutputRank 1 or 2, InputLayout Right, Interval [InputRank-1] // because single stride one or second index has a stride. - ( rank <= 2 && R0_rev && std::is_same< typename Traits::array_layout , Kokkos::LayoutRight >::value ) - ), typename Traits::array_layout , Kokkos::LayoutStride + ( rank <= 2 && R0_rev && std::is_same< typename SrcTraits::array_layout , Kokkos::LayoutRight >::value ) + ), typename SrcTraits::array_layout , Kokkos::LayoutStride >::type array_layout ; - typedef typename Traits::value_type value_type ; + typedef typename SrcTraits::value_type value_type ; typedef typename std::conditional< rank == 0 , value_type , typename std::conditional< rank == 1 , value_type * , @@ -543,66 +554,41 @@ private: public: - typedef - Kokkos::Experimental::ViewTraits< data_type , array_layout - , typename Traits::device_type - , typename Traits::memory_traits > traits_type ; + typedef Kokkos::Experimental::ViewTraits + < data_type + , array_layout + , typename SrcTraits::device_type + , typename SrcTraits::memory_traits > traits_type ; - typedef Kokkos::Experimental::View< data_type - , array_layout - , typename Traits::device_type - , typename Traits::memory_traits > type ; + typedef Kokkos::Experimental::View + < data_type + , array_layout + , typename SrcTraits::device_type + , typename SrcTraits::memory_traits > type ; - template< class T0 , class T1 , class T2 , class T3 - , class T4 , class T5 , class T6 , class T7 > KOKKOS_INLINE_FUNCTION - static void assign( ViewMapping< traits_type , void , void > & dst - , ViewMapping< Traits , void , void > const & src - , T0 const & arg0 - , T1 const & arg1 - , T2 const & arg2 - , T3 const & arg3 - , T4 const & arg4 - , T5 const & arg5 - , T6 const & arg6 - , T7 const & arg7 - ) + static void assign( ViewMapping< traits_type , void > & dst + , ViewMapping< SrcTraits , void > const & src + , Args ... args ) { - typedef ViewMapping< traits_type , void , void > DstType ; + typedef ViewMapping< traits_type , void > DstType ; typedef typename DstType::offset_type dst_offset_type ; typedef typename DstType::handle_type dst_handle_type ; - typedef Kokkos::Experimental::Impl::ViewOffsetRange V0 ; - typedef Kokkos::Experimental::Impl::ViewOffsetRange V1 ; - typedef Kokkos::Experimental::Impl::ViewOffsetRange V2 ; - typedef Kokkos::Experimental::Impl::ViewOffsetRange V3 ; - typedef Kokkos::Experimental::Impl::ViewOffsetRange V4 ; - typedef Kokkos::Experimental::Impl::ViewOffsetRange V5 ; - typedef Kokkos::Experimental::Impl::ViewOffsetRange V6 ; - typedef Kokkos::Experimental::Impl::ViewOffsetRange V7 ; - - dst.m_offset = dst_offset_type - ( src.m_offset - , V0::dimension( src.m_offset.dimension_0() , arg0 ) - , V1::dimension( src.m_offset.dimension_1() , arg1 ) - , V2::dimension( src.m_offset.dimension_2() , arg2 ) - , V3::dimension( src.m_offset.dimension_3() , arg3 ) - , V4::dimension( src.m_offset.dimension_4() , arg4 ) - , V5::dimension( src.m_offset.dimension_5() , arg5 ) - , V6::dimension( src.m_offset.dimension_6() , arg6 ) - , V7::dimension( src.m_offset.dimension_7() , arg7 ) - ); + const SubviewExtents< SrcTraits::rank , rank > + extents( src.m_offset.m_dim , args... ); + dst.m_offset = dst_offset_type( src.m_offset , extents ); dst.m_handle = dst_handle_type( src.m_handle + - src.m_offset( V0::begin( arg0 ) - , V1::begin( arg1 ) - , V2::begin( arg2 ) - , V3::begin( arg3 ) - , V4::begin( arg4 ) - , V5::begin( arg5 ) - , V6::begin( arg6 ) - , V7::begin( arg7 ) + src.m_offset( extents.domain_offset(0) + , extents.domain_offset(1) + , extents.domain_offset(2) + , extents.domain_offset(3) + , extents.domain_offset(4) + , extents.domain_offset(5) + , extents.domain_offset(6) + , extents.domain_offset(7) ) ); } }; diff --git a/lib/kokkos/core/src/impl/KokkosExp_ViewMapping.hpp b/lib/kokkos/core/src/impl/KokkosExp_ViewMapping.hpp index 5fa1bb7155..5ec003222f 100644 --- a/lib/kokkos/core/src/impl/KokkosExp_ViewMapping.hpp +++ b/lib/kokkos/core/src/impl/KokkosExp_ViewMapping.hpp @@ -49,6 +49,7 @@ #include #include +#include #include #include @@ -69,372 +70,162 @@ namespace Kokkos { namespace Experimental { namespace Impl { -template< long sN0 = -1 - , long sN1 = -1 - , long sN2 = -1 - , long sN3 = -1 - , long sN4 = -1 - , long sN5 = -1 - , long sN6 = -1 - , long sN7 = -1 - > -struct ViewDimension { +template< unsigned I , size_t ... Args > +struct variadic_size_t + { enum { value = ~size_t(0) }; }; - enum { arg_N0 = sN0 }; - enum { arg_N1 = sN1 }; - enum { arg_N2 = sN2 }; - enum { arg_N3 = sN3 }; - enum { arg_N4 = sN4 }; - enum { arg_N5 = sN5 }; - enum { arg_N6 = sN6 }; - enum { arg_N7 = sN7 }; +template< size_t Val , size_t ... Args > +struct variadic_size_t< 0 , Val , Args ... > + { enum { value = Val }; }; - enum { rank = ( sN0 < 0 ? 0 : - ( sN1 < 0 ? 1 : - ( sN2 < 0 ? 2 : - ( sN3 < 0 ? 3 : - ( sN4 < 0 ? 4 : - ( sN5 < 0 ? 5 : - ( sN6 < 0 ? 6 : - ( sN7 < 0 ? 7 : 8 )))))))) }; - enum { rank_dynamic = 0 }; +template< unsigned I , size_t Val , size_t ... Args > +struct variadic_size_t< I , Val , Args ... > + { enum { value = variadic_size_t< I - 1 , Args ... >::value }; }; - enum { N0 = 0 < sN0 ? sN0 : 1 }; - enum { N1 = 0 < sN1 ? sN1 : 1 }; - enum { N2 = 0 < sN2 ? sN2 : 1 }; - enum { N3 = 0 < sN3 ? sN3 : 1 }; - enum { N4 = 0 < sN4 ? sN4 : 1 }; - enum { N5 = 0 < sN5 ? sN5 : 1 }; - enum { N6 = 0 < sN6 ? sN6 : 1 }; - enum { N7 = 0 < sN7 ? sN7 : 1 }; - - ViewDimension() = default ; - ViewDimension( const ViewDimension & ) = default ; - ViewDimension & operator = ( const ViewDimension & ) = default ; - - KOKKOS_INLINE_FUNCTION - constexpr ViewDimension( size_t , unsigned , unsigned , unsigned - , unsigned , unsigned , unsigned , unsigned ) {} -}; - -template< long sN1 - , long sN2 - , long sN3 - , long sN4 - , long sN5 - , long sN6 - , long sN7 - > -struct ViewDimension< 0, sN1, sN2, sN3, sN4, sN5, sN6, sN7 > { - - enum { arg_N0 = 0 }; - enum { arg_N1 = sN1 }; - enum { arg_N2 = sN2 }; - enum { arg_N3 = sN3 }; - enum { arg_N4 = sN4 }; - enum { arg_N5 = sN5 }; - enum { arg_N6 = sN6 }; - enum { arg_N7 = sN7 }; - - enum { rank = ( sN1 < 0 ? 1 : - ( sN2 < 0 ? 2 : - ( sN3 < 0 ? 3 : - ( sN4 < 0 ? 4 : - ( sN5 < 0 ? 5 : - ( sN6 < 0 ? 6 : - ( sN7 < 0 ? 7 : 8 ))))))) }; - enum { rank_dynamic = 1 }; - - size_t N0 ; /* When 1 == rank_dynamic allow N0 >= 2^32 */ - enum { N1 = 0 < sN1 ? sN1 : 1 }; - enum { N2 = 0 < sN2 ? sN2 : 1 }; - enum { N3 = 0 < sN3 ? sN3 : 1 }; - enum { N4 = 0 < sN4 ? sN4 : 1 }; - enum { N5 = 0 < sN5 ? sN5 : 1 }; - enum { N6 = 0 < sN6 ? sN6 : 1 }; - enum { N7 = 0 < sN7 ? sN7 : 1 }; - - ViewDimension() = default ; - ViewDimension( const ViewDimension & ) = default ; - ViewDimension & operator = ( const ViewDimension & ) = default ; - - KOKKOS_INLINE_FUNCTION - constexpr ViewDimension( size_t aN0 , unsigned , unsigned , unsigned - , unsigned , unsigned , unsigned , unsigned ) - : N0( aN0 ) {} -}; - -template< long sN2 - , long sN3 - , long sN4 - , long sN5 - , long sN6 - , long sN7 - > -struct ViewDimension< 0, 0, sN2, sN3, sN4, sN5, sN6, sN7 > { - - enum { arg_N0 = 0 }; - enum { arg_N1 = 0 }; - enum { arg_N2 = sN2 }; - enum { arg_N3 = sN3 }; - enum { arg_N4 = sN4 }; - enum { arg_N5 = sN5 }; - enum { arg_N6 = sN6 }; - enum { arg_N7 = sN7 }; - - enum { rank = ( sN2 < 0 ? 2 : - ( sN3 < 0 ? 3 : - ( sN4 < 0 ? 4 : - ( sN5 < 0 ? 5 : - ( sN6 < 0 ? 6 : - ( sN7 < 0 ? 7 : 8 )))))) }; - enum { rank_dynamic = 2 }; - - size_t N0 ; /* When 2 == rank_dynamic allow N0 >= 2^32 */ - size_t N1 ; /* When 2 == rank_dynamic allow N1 >= 2^32 */ - enum { N2 = 0 < sN2 ? sN2 : 1 }; - enum { N3 = 0 < sN3 ? sN3 : 1 }; - enum { N4 = 0 < sN4 ? sN4 : 1 }; - enum { N5 = 0 < sN5 ? sN5 : 1 }; - enum { N6 = 0 < sN6 ? sN6 : 1 }; - enum { N7 = 0 < sN7 ? sN7 : 1 }; - - ViewDimension() = default ; - ViewDimension( const ViewDimension & ) = default ; - ViewDimension & operator = ( const ViewDimension & ) = default ; - - KOKKOS_INLINE_FUNCTION - constexpr ViewDimension( size_t aN0 , unsigned aN1 , unsigned , unsigned - , unsigned , unsigned , unsigned , unsigned ) - : N0( aN0 ) , N1( aN1 ) {} -}; - -template< long sN3 - , long sN4 - , long sN5 - , long sN6 - , long sN7 - > -struct ViewDimension< 0, 0, 0, sN3, sN4, sN5, sN6, sN7 > { - - enum { arg_N0 = 0 }; - enum { arg_N1 = 0 }; - enum { arg_N2 = 0 }; - enum { arg_N3 = sN3 }; - enum { arg_N4 = sN4 }; - enum { arg_N5 = sN5 }; - enum { arg_N6 = sN6 }; - enum { arg_N7 = sN7 }; - - enum { rank = ( sN3 < 0 ? 3 : - ( sN4 < 0 ? 4 : - ( sN5 < 0 ? 5 : - ( sN6 < 0 ? 6 : - ( sN7 < 0 ? 7 : 8 ))))) }; - enum { rank_dynamic = 3 }; - - unsigned N0 ; - unsigned N1 ; - unsigned N2 ; - enum { N3 = 0 < sN3 ? sN3 : 1 }; - enum { N4 = 0 < sN4 ? sN4 : 1 }; - enum { N5 = 0 < sN5 ? sN5 : 1 }; - enum { N6 = 0 < sN6 ? sN6 : 1 }; - enum { N7 = 0 < sN7 ? sN7 : 1 }; - - ViewDimension() = default ; - ViewDimension( const ViewDimension & ) = default ; - ViewDimension & operator = ( const ViewDimension & ) = default ; - - KOKKOS_INLINE_FUNCTION - constexpr ViewDimension( size_t aN0 , unsigned aN1 , unsigned aN2 , unsigned - , unsigned , unsigned , unsigned , unsigned ) - : N0( aN0 ) , N1( aN1 ) , N2( aN2 ) {} -}; - -template< long sN4 - , long sN5 - , long sN6 - , long sN7 - > -struct ViewDimension< 0, 0, 0, 0, sN4, sN5, sN6, sN7 > { - - enum { arg_N0 = 0 }; - enum { arg_N1 = 0 }; - enum { arg_N2 = 0 }; - enum { arg_N3 = 0 }; - enum { arg_N4 = sN4 }; - enum { arg_N5 = sN5 }; - enum { arg_N6 = sN6 }; - enum { arg_N7 = sN7 }; - - enum { rank = ( sN4 < 0 ? 4 : - ( sN5 < 0 ? 5 : - ( sN6 < 0 ? 6 : - ( sN7 < 0 ? 7 : 8 )))) }; - enum { rank_dynamic = 4 }; - - unsigned N0 ; - unsigned N1 ; - unsigned N2 ; - unsigned N3 ; - enum { N4 = 0 < sN4 ? sN4 : 1 }; - enum { N5 = 0 < sN5 ? sN5 : 1 }; - enum { N6 = 0 < sN6 ? sN6 : 1 }; - enum { N7 = 0 < sN7 ? sN7 : 1 }; - - ViewDimension() = default ; - ViewDimension( const ViewDimension & ) = default ; - ViewDimension & operator = ( const ViewDimension & ) = default ; - - KOKKOS_INLINE_FUNCTION - constexpr ViewDimension( size_t aN0 , unsigned aN1 , unsigned aN2 , unsigned aN3 - , unsigned , unsigned , unsigned , unsigned ) - : N0( aN0 ) , N1( aN1 ) , N2( aN2 ) , N3( aN3 ) {} -}; - -template< long sN5 - , long sN6 - , long sN7 - > -struct ViewDimension< 0, 0, 0, 0, 0, sN5, sN6, sN7 > { - - enum { arg_N0 = 0 }; - enum { arg_N1 = 0 }; - enum { arg_N2 = 0 }; - enum { arg_N3 = 0 }; - enum { arg_N4 = 0 }; - enum { arg_N5 = sN5 }; - enum { arg_N6 = sN6 }; - enum { arg_N7 = sN7 }; - - enum { rank = ( sN5 < 0 ? 5 : - ( sN6 < 0 ? 6 : - ( sN7 < 0 ? 7 : 8 ))) }; - enum { rank_dynamic = 5 }; - - unsigned N0 ; - unsigned N1 ; - unsigned N2 ; - unsigned N3 ; - unsigned N4 ; - enum { N5 = 0 < sN5 ? sN5 : 1 }; - enum { N6 = 0 < sN6 ? sN6 : 1 }; - enum { N7 = 0 < sN7 ? sN7 : 1 }; - - ViewDimension() = default ; - ViewDimension( const ViewDimension & ) = default ; - ViewDimension & operator = ( const ViewDimension & ) = default ; - - KOKKOS_INLINE_FUNCTION - constexpr ViewDimension( size_t aN0 , unsigned aN1 , unsigned aN2 , unsigned aN3 - , unsigned aN4 , unsigned , unsigned , unsigned ) - : N0( aN0 ) , N1( aN1 ) , N2( aN2 ) , N3( aN3 ) , N4( aN4 ) {} -}; - -template< long sN6 - , long sN7 - > -struct ViewDimension< 0, 0, 0, 0, 0, 0, sN6, sN7 > { - - enum { arg_N0 = 0 }; - enum { arg_N1 = 0 }; - enum { arg_N2 = 0 }; - enum { arg_N3 = 0 }; - enum { arg_N4 = 0 }; - enum { arg_N5 = 0 }; - enum { arg_N6 = sN6 }; - enum { arg_N7 = sN7 }; - - enum { rank = ( sN6 < 0 ? 6 : - ( sN7 < 0 ? 7 : 8 )) }; - enum { rank_dynamic = 6 }; - - unsigned N0 ; - unsigned N1 ; - unsigned N2 ; - unsigned N3 ; - unsigned N4 ; - unsigned N5 ; - enum { N6 = 0 < sN6 ? sN6 : 1 }; - enum { N7 = 0 < sN7 ? sN7 : 1 }; - - ViewDimension() = default ; - ViewDimension( const ViewDimension & ) = default ; - ViewDimension & operator = ( const ViewDimension & ) = default ; - - KOKKOS_INLINE_FUNCTION - constexpr ViewDimension( size_t aN0 , unsigned aN1 , unsigned aN2 , unsigned aN3 - , unsigned aN4 , unsigned aN5 , unsigned , unsigned ) - : N0( aN0 ) , N1( aN1 ) , N2( aN2 ) , N3( aN3 ) , N4( aN4 ) , N5( aN5 ) {} -}; - -template< long sN7 > -struct ViewDimension< 0, 0, 0, 0, 0, 0, 0, sN7 > { - - enum { arg_N0 = 0 }; - enum { arg_N1 = 0 }; - enum { arg_N2 = 0 }; - enum { arg_N3 = 0 }; - enum { arg_N4 = 0 }; - enum { arg_N5 = 0 }; - enum { arg_N6 = 0 }; - enum { arg_N7 = sN7 }; - - enum { rank = ( sN7 < 0 ? 7 : 8 ) }; - enum { rank_dynamic = 7 }; - - unsigned N0 ; - unsigned N1 ; - unsigned N2 ; - unsigned N3 ; - unsigned N4 ; - unsigned N5 ; - unsigned N6 ; - enum { N7 = 0 < sN7 ? sN7 : 1 }; - - ViewDimension() = default ; - ViewDimension( const ViewDimension & ) = default ; - ViewDimension & operator = ( const ViewDimension & ) = default ; - - KOKKOS_INLINE_FUNCTION - constexpr ViewDimension( size_t aN0 , unsigned aN1 , unsigned aN2 , unsigned aN3 - , unsigned aN4 , unsigned aN5 , unsigned aN6 , unsigned ) - : N0( aN0 ) , N1( aN1 ) , N2( aN2 ) , N3( aN3 ) , N4( aN4 ) , N5( aN5 ) , N6( aN6 ) {} -}; +template< size_t ... Args > +struct rank_dynamic ; template<> -struct ViewDimension< 0, 0, 0, 0, 0, 0, 0, 0 > { +struct rank_dynamic<> { enum { value = 0 }; }; - enum { arg_N0 = 0 }; - enum { arg_N1 = 0 }; - enum { arg_N2 = 0 }; - enum { arg_N3 = 0 }; - enum { arg_N4 = 0 }; - enum { arg_N5 = 0 }; - enum { arg_N6 = 0 }; - enum { arg_N7 = 0 }; +template< size_t Val , size_t ... Args > +struct rank_dynamic< Val , Args... > +{ + enum { value = ( Val == 0 ? 1 : 0 ) + rank_dynamic< Args... >::value }; +}; - enum { rank = 8 }; - enum { rank_dynamic = 8 }; +#define KOKKOS_IMPL_VIEW_DIMENSION( R ) \ + template< size_t V , unsigned > struct ViewDimension ## R \ + { \ + enum { ArgN ## R = ( V != ~size_t(0) ? V : 1 ) }; \ + enum { N ## R = ( V != ~size_t(0) ? V : 1 ) }; \ + KOKKOS_INLINE_FUNCTION explicit ViewDimension ## R ( size_t ) {} \ + ViewDimension ## R () = default ; \ + ViewDimension ## R ( const ViewDimension ## R & ) = default ; \ + ViewDimension ## R & operator = ( const ViewDimension ## R & ) = default ; \ + }; \ + template< unsigned RD > struct ViewDimension ## R < 0 , RD > \ + { \ + enum { ArgN ## R = 0 }; \ + typename std::conditional<( RD < 3 ), size_t , unsigned >::type N ## R ; \ + ViewDimension ## R () = default ; \ + ViewDimension ## R ( const ViewDimension ## R & ) = default ; \ + ViewDimension ## R & operator = ( const ViewDimension ## R & ) = default ; \ + KOKKOS_INLINE_FUNCTION explicit ViewDimension ## R ( size_t V ) : N ## R ( V ) {} \ + }; - unsigned N0 ; - unsigned N1 ; - unsigned N2 ; - unsigned N3 ; - unsigned N4 ; - unsigned N5 ; - unsigned N6 ; - unsigned N7 ; +KOKKOS_IMPL_VIEW_DIMENSION( 0 ) +KOKKOS_IMPL_VIEW_DIMENSION( 1 ) +KOKKOS_IMPL_VIEW_DIMENSION( 2 ) +KOKKOS_IMPL_VIEW_DIMENSION( 3 ) +KOKKOS_IMPL_VIEW_DIMENSION( 4 ) +KOKKOS_IMPL_VIEW_DIMENSION( 5 ) +KOKKOS_IMPL_VIEW_DIMENSION( 6 ) +KOKKOS_IMPL_VIEW_DIMENSION( 7 ) + +#undef KOKKOS_IMPL_VIEW_DIMENSION + +template< size_t ... Vals > +struct ViewDimension + : public ViewDimension0< variadic_size_t<0,Vals...>::value + , rank_dynamic< Vals... >::value > + , public ViewDimension1< variadic_size_t<1,Vals...>::value + , rank_dynamic< Vals... >::value > + , public ViewDimension2< variadic_size_t<2,Vals...>::value + , rank_dynamic< Vals... >::value > + , public ViewDimension3< variadic_size_t<3,Vals...>::value + , rank_dynamic< Vals... >::value > + , public ViewDimension4< variadic_size_t<4,Vals...>::value + , rank_dynamic< Vals... >::value > + , public ViewDimension5< variadic_size_t<5,Vals...>::value + , rank_dynamic< Vals... >::value > + , public ViewDimension6< variadic_size_t<6,Vals...>::value + , rank_dynamic< Vals... >::value > + , public ViewDimension7< variadic_size_t<7,Vals...>::value + , rank_dynamic< Vals... >::value > +{ + typedef ViewDimension0< variadic_size_t<0,Vals...>::value + , rank_dynamic< Vals... >::value > D0 ; + typedef ViewDimension1< variadic_size_t<1,Vals...>::value + , rank_dynamic< Vals... >::value > D1 ; + typedef ViewDimension2< variadic_size_t<2,Vals...>::value + , rank_dynamic< Vals... >::value > D2 ; + typedef ViewDimension3< variadic_size_t<3,Vals...>::value + , rank_dynamic< Vals... >::value > D3 ; + typedef ViewDimension4< variadic_size_t<4,Vals...>::value + , rank_dynamic< Vals... >::value > D4 ; + typedef ViewDimension5< variadic_size_t<5,Vals...>::value + , rank_dynamic< Vals... >::value > D5 ; + typedef ViewDimension6< variadic_size_t<6,Vals...>::value + , rank_dynamic< Vals... >::value > D6 ; + typedef ViewDimension7< variadic_size_t<7,Vals...>::value + , rank_dynamic< Vals... >::value > D7 ; + + using D0::ArgN0 ; + using D1::ArgN1 ; + using D2::ArgN2 ; + using D3::ArgN3 ; + using D4::ArgN4 ; + using D5::ArgN5 ; + using D6::ArgN6 ; + using D7::ArgN7 ; + + using D0::N0 ; + using D1::N1 ; + using D2::N2 ; + using D3::N3 ; + using D4::N4 ; + using D5::N5 ; + using D6::N6 ; + using D7::N7 ; + + enum { rank = sizeof...(Vals) }; + enum { rank_dynamic = Impl::rank_dynamic< Vals... >::value }; ViewDimension() = default ; ViewDimension( const ViewDimension & ) = default ; ViewDimension & operator = ( const ViewDimension & ) = default ; KOKKOS_INLINE_FUNCTION - constexpr ViewDimension( size_t aN0 , unsigned aN1 , unsigned aN2 , unsigned aN3 - , unsigned aN4 , unsigned aN5 , unsigned aN6 , unsigned aN7 ) - : N0( aN0 ) , N1( aN1 ) , N2( aN2 ) , N3( aN3 ) , N4( aN4 ) , N5( aN5 ) , N6( aN6 ) , N7( aN7 ) {} + constexpr + ViewDimension( size_t n0 , size_t n1 , size_t n2 , size_t n3 + , size_t n4 , size_t n5 , size_t n6 , size_t n7 ) + : D0( n0 ) + , D1( n1 ) + , D2( n2 ) + , D3( n3 ) + , D4( n4 ) + , D5( n5 ) + , D6( n6 ) + , D7( n7 ) + {} + + KOKKOS_INLINE_FUNCTION + constexpr size_t extent( const unsigned r ) const + { + return r == 0 ? N0 : ( + r == 1 ? N1 : ( + r == 2 ? N2 : ( + r == 3 ? N3 : ( + r == 4 ? N4 : ( + r == 5 ? N5 : ( + r == 6 ? N6 : ( + r == 7 ? N7 : 0 ))))))); + } + + template< size_t N > + struct prepend { typedef ViewDimension< N , Vals... > type ; }; + + template< size_t N > + struct append { typedef ViewDimension< Vals... , N > type ; }; +}; + +template< class A , class B > +struct ViewDimensionJoin ; + +template< size_t ... A , size_t ... B > +struct ViewDimensionJoin< ViewDimension< A... > , ViewDimension< B... > > { + typedef ViewDimension< A... , B... > type ; }; //---------------------------------------------------------------------------- @@ -442,24 +233,381 @@ struct ViewDimension< 0, 0, 0, 0, 0, 0, 0, 0 > { template< class DstDim , class SrcDim > struct ViewDimensionAssignable ; -template< long dN0 , long dN1 , long dN2 , long dN3 , long dN4 , long dN5 , long dN6 , long dN7 - , long sN0 , long sN1 , long sN2 , long sN3 , long sN4 , long sN5 , long sN6 , long sN7 > -struct ViewDimensionAssignable< ViewDimension - , ViewDimension > +template< size_t ... DstArgs , size_t ... SrcArgs > +struct ViewDimensionAssignable< ViewDimension< DstArgs ... > + , ViewDimension< SrcArgs ... > > { - typedef ViewDimension dst ; - typedef ViewDimension src ; + typedef ViewDimension< DstArgs... > dst ; + typedef ViewDimension< SrcArgs... > src ; - enum { value = dst::rank == src::rank && - dst::rank_dynamic >= src::rank_dynamic && - ( 0 < dst::rank_dynamic || dN0 == sN0 ) && - ( 1 < dst::rank_dynamic || dN1 == sN1 ) && - ( 2 < dst::rank_dynamic || dN2 == sN2 ) && - ( 3 < dst::rank_dynamic || dN3 == sN3 ) && - ( 4 < dst::rank_dynamic || dN4 == sN4 ) && - ( 5 < dst::rank_dynamic || dN5 == sN5 ) && - ( 6 < dst::rank_dynamic || dN6 == sN6 ) && - ( 7 < dst::rank_dynamic || dN7 == sN7 ) }; + enum { value = + dst::rank == src::rank && + dst::rank_dynamic >= src::rank_dynamic && + ( 0 < dst::rank_dynamic || size_t(dst::ArgN0) == size_t(src::ArgN0) ) && + ( 1 < dst::rank_dynamic || size_t(dst::ArgN1) == size_t(src::ArgN1) ) && + ( 2 < dst::rank_dynamic || size_t(dst::ArgN2) == size_t(src::ArgN2) ) && + ( 3 < dst::rank_dynamic || size_t(dst::ArgN3) == size_t(src::ArgN3) ) && + ( 4 < dst::rank_dynamic || size_t(dst::ArgN4) == size_t(src::ArgN4) ) && + ( 5 < dst::rank_dynamic || size_t(dst::ArgN5) == size_t(src::ArgN5) ) && + ( 6 < dst::rank_dynamic || size_t(dst::ArgN6) == size_t(src::ArgN6) ) && + ( 7 < dst::rank_dynamic || size_t(dst::ArgN7) == size_t(src::ArgN7) ) }; +}; + +}}} // namespace Kokkos::Experimental::Impl + +//---------------------------------------------------------------------------- +//---------------------------------------------------------------------------- + +namespace Kokkos { +namespace Experimental { +namespace Impl { + +struct ALL_t { + KOKKOS_INLINE_FUNCTION + constexpr const ALL_t & operator()() const { return *this ; } +}; + +template< class T > +struct is_integral_extent_type +{ enum { value = std::is_same::value ? 1 : 0 }; }; + +template< class iType > +struct is_integral_extent_type< std::pair > +{ enum { value = std::is_integral::value ? 1 : 0 }; }; + +template< class iType > +struct is_integral_extent_type< Kokkos::pair > +{ enum { value = std::is_integral::value ? 1 : 0 }; }; + +// Assuming '2 == initializer_list::size()' +template< class iType > +struct is_integral_extent_type< std::initializer_list > +{ enum { value = std::is_integral::value ? 1 : 0 }; }; + +template < unsigned I , class ... Args > +struct is_integral_extent +{ + // variadic_type is void when sizeof...(Args) <= I + typedef typename std::remove_cv< + typename std::remove_reference< + typename Kokkos::Impl::variadic_type::type >::type >::type type ; + + enum { value = is_integral_extent_type::value }; + + static_assert( value || + std::is_integral::value || + std::is_same::value + , "subview argument must be either integral or integral extent" ); +}; + +template< unsigned DomainRank , unsigned RangeRank > +struct SubviewExtents { +private: + + // Cannot declare zero-length arrays + enum { InternalRangeRank = RangeRank ? RangeRank : 1u }; + + size_t m_begin[ DomainRank ]; + size_t m_length[ InternalRangeRank ]; + unsigned m_index[ InternalRangeRank ]; + + template< size_t ... DimArgs > + KOKKOS_FORCEINLINE_FUNCTION + bool set( unsigned domain_rank + , unsigned range_rank + , const ViewDimension< DimArgs ... > & dim ) + { return true ; } + + template< class T , size_t ... DimArgs , class ... Args > + KOKKOS_FORCEINLINE_FUNCTION + bool set( unsigned domain_rank + , unsigned range_rank + , const ViewDimension< DimArgs ... > & dim + , const T & val + , Args ... args ) + { + const size_t v = static_cast(val); + + m_begin[ domain_rank ] = v ; + + return set( domain_rank + 1 , range_rank , dim , args... ) +#if defined( KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK ) + && ( v < dim.extent( domain_rank ) ) +#endif + ; + } + + // std::pair range + template< size_t ... DimArgs , class ... Args > + KOKKOS_FORCEINLINE_FUNCTION + bool set( unsigned domain_rank + , unsigned range_rank + , const ViewDimension< DimArgs ... > & dim + , const Kokkos::Experimental::Impl::ALL_t + , Args ... args ) + { + m_begin[ domain_rank ] = 0 ; + m_length[ range_rank ] = dim.extent( domain_rank ); + m_index[ range_rank ] = domain_rank ; + + return set( domain_rank + 1 , range_rank + 1 , dim , args... ); + } + + // std::pair range + template< class T , size_t ... DimArgs , class ... Args > + KOKKOS_FORCEINLINE_FUNCTION + bool set( unsigned domain_rank + , unsigned range_rank + , const ViewDimension< DimArgs ... > & dim + , const std::pair & val + , Args ... args ) + { + const size_t b = static_cast( val.first ); + const size_t e = static_cast( val.second ); + + m_begin[ domain_rank ] = b ; + m_length[ range_rank ] = e - b ; + m_index[ range_rank ] = domain_rank ; + + return set( domain_rank + 1 , range_rank + 1 , dim , args... ) +#if defined( KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK ) + && ( e <= b + dim.extent( domain_rank ) ) +#endif + ; + } + + // Kokkos::pair range + template< class T , size_t ... DimArgs , class ... Args > + KOKKOS_FORCEINLINE_FUNCTION + bool set( unsigned domain_rank + , unsigned range_rank + , const ViewDimension< DimArgs ... > & dim + , const Kokkos::pair & val + , Args ... args ) + { + const size_t b = static_cast( val.first ); + const size_t e = static_cast( val.second ); + + m_begin[ domain_rank ] = b ; + m_length[ range_rank ] = e - b ; + m_index[ range_rank ] = domain_rank ; + + return set( domain_rank + 1 , range_rank + 1 , dim , args... ) +#if defined( KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK ) + && ( e <= b + dim.extent( domain_rank ) ) +#endif + ; + } + + // { begin , end } range + template< class T , size_t ... DimArgs , class ... Args > + KOKKOS_FORCEINLINE_FUNCTION + bool set( unsigned domain_rank + , unsigned range_rank + , const ViewDimension< DimArgs ... > & dim + , const std::initializer_list< T > & val + , Args ... args ) + { + const size_t b = static_cast( val.begin()[0] ); + const size_t e = static_cast( val.begin()[1] ); + + m_begin[ domain_rank ] = b ; + m_length[ range_rank ] = e - b ; + m_index[ range_rank ] = domain_rank ; + + return set( domain_rank + 1 , range_rank + 1 , dim , args... ) +#if defined( KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK ) + && ( val.size() == 2 ) + && ( e <= b + dim.extent( domain_rank ) ) +#endif + ; + } + + //------------------------------ + +#if defined( KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK ) + + template< size_t ... DimArgs > + void error( char * + , int + , unsigned + , unsigned + , const ViewDimension< DimArgs ... > & ) const + {} + + template< class T , size_t ... DimArgs , class ... Args > + void error( char * buf , int buf_len + , unsigned domain_rank + , unsigned range_rank + , const ViewDimension< DimArgs ... > & dim + , const T & val + , Args ... args ) const + { + const int n = std::min( buf_len , + snprintf( buf , buf_len + , " %lu < %lu %c" + , static_cast(val) + , static_cast( dim.extent( domain_rank ) ) + , int( sizeof...(Args) ? ',' : ')' ) ) ); + + error( buf+n, buf_len-n, domain_rank + 1 , range_rank , dim , args... ); + } + + // std::pair range + template< size_t ... DimArgs , class ... Args > + void error( char * buf , int buf_len + , unsigned domain_rank + , unsigned range_rank + , const ViewDimension< DimArgs ... > & dim + , const Kokkos::Experimental::Impl::ALL_t + , Args ... args ) const + { + const int n = std::min( buf_len , + snprintf( buf , buf_len + , " Kokkos::ALL %c" + , int( sizeof...(Args) ? ',' : ')' ) ) ); + + error( buf+n , buf_len-n , domain_rank + 1 , range_rank + 1 , dim , args... ); + } + + // std::pair range + template< class T , size_t ... DimArgs , class ... Args > + void error( char * buf , int buf_len + , unsigned domain_rank + , unsigned range_rank + , const ViewDimension< DimArgs ... > & dim + , const std::pair & val + , Args ... args ) const + { + // d <= e - b + const int n = std::min( buf_len , + snprintf( buf , buf_len + , " %lu <= %lu - %lu %c" + , static_cast( dim.extent( domain_rank ) ) + , static_cast( val.second ) + , static_cast( val.begin ) + , int( sizeof...(Args) ? ',' : ')' ) ) ); + + error( buf+n , buf_len-n , domain_rank + 1 , range_rank + 1 , dim , args... ); + } + + // Kokkos::pair range + template< class T , size_t ... DimArgs , class ... Args > + void error( char * buf , int buf_len + , unsigned domain_rank + , unsigned range_rank + , const ViewDimension< DimArgs ... > & dim + , const Kokkos::pair & val + , Args ... args ) const + { + // d <= e - b + const int n = std::min( buf_len , + snprintf( buf , buf_len + , " %lu <= %lu - %lu %c" + , static_cast( dim.extent( domain_rank ) ) + , static_cast( val.second ) + , static_cast( val.begin ) + , int( sizeof...(Args) ? ',' : ')' ) ) ); + + error( buf+n , buf_len-n , domain_rank + 1 , range_rank + 1 , dim , args... ); + } + + // { begin , end } range + template< class T , size_t ... DimArgs , class ... Args > + void error( char * buf , int buf_len + , unsigned domain_rank + , unsigned range_rank + , const ViewDimension< DimArgs ... > & dim + , const std::initializer_list< T > & val + , Args ... args ) const + { + // d <= e - b + int n = 0 ; + if ( val.size() == 2 ) { + n = std::min( buf_len , + snprintf( buf , buf_len + , " %lu <= %lu - %lu %c" + , static_cast( dim.extent( domain_rank ) ) + , static_cast( val.begin()[0] ) + , static_cast( val.begin()[1] ) + , int( sizeof...(Args) ? ',' : ')' ) ) ); + } + else { + n = std::min( buf_len , + snprintf( buf , buf_len + , " { ... }.size() == %u %c" + , unsigned(val.size()) + , int( sizeof...(Args) ? ',' : ')' ) ) ); + } + + error( buf+n , buf_len-n , domain_rank + 1 , range_rank + 1 , dim , args... ); + } + + template< size_t ... DimArgs , class ... Args > + void error( const ViewDimension< DimArgs ... > & dim , Args ... args ) const + { +#if defined( KOKKOS_ACTIVE_EXECUTION_SPACE_HOST ) + enum { LEN = 1024 }; + char buffer[ LEN ]; + + const int n = snprintf(buffer,LEN,"Kokkos::subview bounds error ("); + error( buffer+n , LEN-n , 0 , 0 , dim , args... ); + + Kokkos::Impl::throw_runtime_exception(std::string(buffer)); +#else + Kokkos::abort("Kokkos::subview bounds error"); +#endif + } + +#else + + template< size_t ... DimArgs , class ... Args > + KOKKOS_FORCEINLINE_FUNCTION + void error( const ViewDimension< DimArgs ... > & , Args ... ) const {} + +#endif + +public: + + template< size_t ... DimArgs , class ... Args > + KOKKOS_INLINE_FUNCTION + SubviewExtents( const ViewDimension< DimArgs ... > & dim , Args ... args ) + { + static_assert( DomainRank == sizeof...(DimArgs) , "" ); + static_assert( DomainRank == sizeof...(Args) , "" ); + + // Verifies that all arguments, up to 8, are integral types, + // integral extents, or don't exist. + static_assert( RangeRank == + unsigned( is_integral_extent<0,Args...>::value ) + + unsigned( is_integral_extent<1,Args...>::value ) + + unsigned( is_integral_extent<2,Args...>::value ) + + unsigned( is_integral_extent<3,Args...>::value ) + + unsigned( is_integral_extent<4,Args...>::value ) + + unsigned( is_integral_extent<5,Args...>::value ) + + unsigned( is_integral_extent<6,Args...>::value ) + + unsigned( is_integral_extent<7,Args...>::value ) , "" ); + + if ( RangeRank == 0 ) { m_length[0] = 0 ; m_index[0] = ~0u ; } + + if ( ! set( 0 , 0 , dim , args... ) ) error( dim , args... ); + } + + template < typename iType > + KOKKOS_FORCEINLINE_FUNCTION + constexpr size_t domain_offset( const iType i ) const + { return unsigned(i) < DomainRank ? m_begin[i] : 0 ; } + + template < typename iType > + KOKKOS_FORCEINLINE_FUNCTION + constexpr size_t range_extent( const iType i ) const + { return unsigned(i) < InternalRangeRank ? m_length[i] : 0 ; } + + template < typename iType > + KOKKOS_FORCEINLINE_FUNCTION + constexpr unsigned range_index( const iType i ) const + { return unsigned(i) < InternalRangeRank ? m_index[i] : ~0u ; } }; }}} // namespace Kokkos::Experimental::Impl @@ -472,92 +620,25 @@ namespace Experimental { namespace Impl { /** \brief Given a value type and dimension generate the View data type */ -template< class T , class Dim /* ViewDimension */ > -struct ViewDataType { - enum { R = Dim::rank }; - enum { RD = Dim::rank_dynamic }; +template< class T , class Dim > +struct ViewDataType ; - // Unused static dimensions are set to 1 (instead of 0 or -1L) to avoid compile errors - // in the 'false' clauses of the std::conditional. +template< class T > +struct ViewDataType< T , ViewDimension<> > +{ + typedef T type ; +}; - enum { N0 = 0 < Dim::arg_N0 ? Dim::arg_N0 : 1 }; - enum { N1 = 0 < Dim::arg_N1 ? Dim::arg_N1 : 1 }; - enum { N2 = 0 < Dim::arg_N2 ? Dim::arg_N2 : 1 }; - enum { N3 = 0 < Dim::arg_N3 ? Dim::arg_N3 : 1 }; - enum { N4 = 0 < Dim::arg_N4 ? Dim::arg_N4 : 1 }; - enum { N5 = 0 < Dim::arg_N5 ? Dim::arg_N5 : 1 }; - enum { N6 = 0 < Dim::arg_N6 ? Dim::arg_N6 : 1 }; - enum { N7 = 0 < Dim::arg_N7 ? Dim::arg_N7 : 1 }; +template< class T , size_t ... Args > +struct ViewDataType< T , ViewDimension< 0 , Args... > > +{ + typedef typename ViewDataType >::type type ; +}; - typedef typename std::conditional< R == 0 , T , - typename std::conditional< R == 1 , - typename std::conditional< RD == 0 , T[N0] , T * >::type , - - typename std::conditional< R == 2 , - typename std::conditional< RD == 0 , T[N0][N1] , - typename std::conditional< RD == 1 , T* [N1] , - T** - >::type >::type , - - typename std::conditional< R == 3 , - typename std::conditional< RD == 0 , T[N0][N1][N2] , - typename std::conditional< RD == 1 , T* [N1][N2] , - typename std::conditional< RD == 2 , T** [N2] , - T*** - >::type >::type >::type , - - typename std::conditional< R == 4 , - typename std::conditional< RD == 0 , T[N0][N1][N2][N3] , - typename std::conditional< RD == 1 , T* [N1][N2][N3] , - typename std::conditional< RD == 2 , T** [N2][N3] , - typename std::conditional< RD == 3 , T*** [N3] , - T**** - >::type >::type >::type >::type , - - typename std::conditional< R == 5 , - typename std::conditional< RD == 0 , T[N0][N1][N2][N3][N4] , - typename std::conditional< RD == 1 , T* [N1][N2][N3][N4] , - typename std::conditional< RD == 2 , T** [N2][N3][N4] , - typename std::conditional< RD == 3 , T*** [N3][N4] , - typename std::conditional< RD == 4 , T**** [N4] , - T***** - >::type >::type >::type >::type >::type , - - typename std::conditional< R == 6 , - typename std::conditional< RD == 0 , T[N0][N1][N2][N3][N4][N5] , - typename std::conditional< RD == 1 , T* [N1][N2][N3][N4][N5] , - typename std::conditional< RD == 2 , T** [N2][N3][N4][N5] , - typename std::conditional< RD == 3 , T*** [N3][N4][N5] , - typename std::conditional< RD == 4 , T**** [N4][N5] , - typename std::conditional< RD == 5 , T***** [N5] , - T****** - >::type >::type >::type >::type >::type >::type , - - typename std::conditional< R == 7 , - typename std::conditional< RD == 0 , T[N0][N1][N2][N3][N4][N5][N6] , - typename std::conditional< RD == 1 , T* [N1][N2][N3][N4][N5][N6] , - typename std::conditional< RD == 2 , T** [N2][N3][N4][N5][N6] , - typename std::conditional< RD == 3 , T*** [N3][N4][N5][N6] , - typename std::conditional< RD == 4 , T**** [N4][N5][N6] , - typename std::conditional< RD == 5 , T***** [N5][N6] , - typename std::conditional< RD == 6 , T****** [N6] , - T******* - >::type >::type >::type >::type >::type >::type >::type , - - typename std::conditional< R == 8 , - typename std::conditional< RD == 0 , T[N0][N1][N2][N3][N4][N5][N6][N7] , - typename std::conditional< RD == 1 , T* [N1][N2][N3][N4][N5][N6][N7] , - typename std::conditional< RD == 2 , T** [N2][N3][N4][N5][N6][N7] , - typename std::conditional< RD == 3 , T*** [N3][N4][N5][N6][N7] , - typename std::conditional< RD == 4 , T**** [N4][N5][N6][N7] , - typename std::conditional< RD == 5 , T***** [N5][N6][N7] , - typename std::conditional< RD == 6 , T****** [N6][N7] , - typename std::conditional< RD == 7 , T******* [N7] , - T******** - >::type >::type >::type >::type >::type >::type >::type >::type , - - void >::type >::type >::type >::type >::type >::type >::type >::type >::type - type ; +template< class T , size_t N , size_t ... Args > +struct ViewDataType< T , ViewDimension< N , Args... > > +{ + typedef typename ViewDataType >::type type[N] ; }; /**\brief Analysis of View data type. @@ -570,103 +651,79 @@ struct ViewDataType { * Provide typedef for the ViewDimension<...> and value_type. */ template< class T > -struct ViewArrayAnalysis +struct ViewArrayAnalysis { -private: - // std::rank, std::extent, and std::remove_all_extents - // consider "const value_type***" to be the type. - - // Strip away pointers and count them - typedef typename std::remove_all_extents< T >::type t_0 ; // brackets removed - typedef typename std::remove_pointer< t_0 >::type t_1 ; - typedef typename std::remove_pointer< t_1 >::type t_2 ; - typedef typename std::remove_pointer< t_2 >::type t_3 ; - typedef typename std::remove_pointer< t_3 >::type t_4 ; - typedef typename std::remove_pointer< t_4 >::type t_5 ; - typedef typename std::remove_pointer< t_5 >::type t_6 ; - typedef typename std::remove_pointer< t_6 >::type t_7 ; - typedef typename std::remove_pointer< t_7 >::type t_8 ; - typedef typename std::remove_pointer< t_8 >::type t_9 ; - typedef typename std::remove_pointer< t_9 >::type t_10 ; - - enum { rank_pointer = - ( ! std::is_pointer< t_0 >::value ? 0 : - ( ! std::is_pointer< t_1 >::value ? 1 : - ( ! std::is_pointer< t_2 >::value ? 2 : - ( ! std::is_pointer< t_3 >::value ? 3 : - ( ! std::is_pointer< t_4 >::value ? 4 : - ( ! std::is_pointer< t_5 >::value ? 5 : - ( ! std::is_pointer< t_6 >::value ? 6 : - ( ! std::is_pointer< t_7 >::value ? 7 : - ( ! std::is_pointer< t_8 >::value ? 8 : - ( ! std::is_pointer< t_9 >::value ? 9 : - ( ! std::is_pointer< t_10 >::value ? 10 : 0x7fffffff ))))))))))) }; - - // The pointer-stripped type t_10 may have been an array typedef of the form 'type[#][#]...' - // Append those dimensions. - - enum { rank_bracket = std::rank< T >::value }; - enum { rank_bracket_nested = std::rank< t_10 >::value }; - enum { rank_base = rank_pointer + rank_bracket }; - enum { rank = rank_pointer + rank_bracket + rank_bracket_nested }; - - static_assert( rank <= 10 , "Maximum ten dimensional array" ); - - enum { extent_0 = 0 < rank_base ? std::extent< T , rank_pointer <= 0 ? 0 - rank_pointer : 10 >::value - : std::extent< t_10 , rank_base <= 0 ? 0 - rank_base : 10 >::value }; - - enum { extent_1 = 1 < rank_base ? std::extent< T , rank_pointer <= 1 ? 1 - rank_pointer : 10 >::value - : std::extent< t_10 , rank_base <= 1 ? 1 - rank_base : 10 >::value }; - - enum { extent_2 = 2 < rank_base ? std::extent< T , rank_pointer <= 2 ? 2 - rank_pointer : 10 >::value - : std::extent< t_10 , rank_base <= 2 ? 2 - rank_base : 10 >::value }; - - enum { extent_3 = 3 < rank_base ? std::extent< T , rank_pointer <= 3 ? 3 - rank_pointer : 10 >::value - : std::extent< t_10 , rank_base <= 3 ? 3 - rank_base : 10 >::value }; - - enum { extent_4 = 4 < rank_base ? std::extent< T , rank_pointer <= 4 ? 4 - rank_pointer : 10 >::value - : std::extent< t_10 , rank_base <= 4 ? 4 - rank_base : 10 >::value }; - - enum { extent_5 = 5 < rank_base ? std::extent< T , rank_pointer <= 5 ? 5 - rank_pointer : 10 >::value - : std::extent< t_10 , rank_base <= 5 ? 5 - rank_base : 10 >::value }; - - enum { extent_6 = 6 < rank_base ? std::extent< T , rank_pointer <= 6 ? 6 - rank_pointer : 10 >::value - : std::extent< t_10 , rank_base <= 6 ? 6 - rank_base : 10 >::value }; - - enum { extent_7 = 7 < rank_base ? std::extent< T , rank_pointer <= 7 ? 7 - rank_pointer : 10 >::value - : std::extent< t_10 , rank_base <= 7 ? 7 - rank_base : 10 >::value }; - - enum { extent_8 = 8 < rank_base ? std::extent< T , rank_pointer <= 8 ? 8 - rank_pointer : 10 >::value - : std::extent< t_10 , rank_base <= 8 ? 8 - rank_base : 10 >::value }; - - enum { extent_9 = 9 < rank_base ? std::extent< T , rank_pointer <= 9 ? 9 - rank_pointer : 10 >::value - : std::extent< t_10 , rank_base <= 9 ? 9 - rank_base : 10 >::value }; - - typedef typename std::remove_all_extents< t_10 >::type base_type ; - - enum { rank_dynamic = rank_pointer ? rank_pointer : ( ( rank_bracket && extent_0 == 0 ) ? 1 : 0 ) }; - -public: - - typedef ViewDimension< ( rank <= 0 ? -1L : extent_0 ) - , ( rank <= 1 ? -1L : extent_1 ) - , ( rank <= 2 ? -1L : extent_2 ) - , ( rank <= 3 ? -1L : extent_3 ) - , ( rank <= 4 ? -1L : extent_4 ) - , ( rank <= 5 ? -1L : extent_5 ) - , ( rank <= 6 ? -1L : extent_6 ) - , ( rank <= 7 ? -1L : extent_7 ) - > dimension ; - - typedef base_type value_type ; - typedef typename std::add_const< base_type >::type const_value_type ; - typedef typename std::remove_const< base_type >::type non_const_value_type ; - - static_assert( unsigned(dimension::rank) == unsigned(rank) , "" ); - static_assert( unsigned(dimension::rank_dynamic) == unsigned(rank_dynamic) , "" ); + typedef T value_type ; + typedef typename std::add_const< T >::type const_value_type ; + typedef typename std::remove_const< T >::type non_const_value_type ; + typedef ViewDimension<> static_dimension ; + typedef ViewDimension<> dynamic_dimension ; + typedef ViewDimension<> dimension ; }; -template< class DataType , class ValueType , class ArrayLayout > +template< class T , size_t N > +struct ViewArrayAnalysis< T[N] > +{ +private: + typedef ViewArrayAnalysis< T > nested ; +public: + typedef typename nested::value_type value_type ; + typedef typename nested::const_value_type const_value_type ; + typedef typename nested::non_const_value_type non_const_value_type ; + + typedef typename nested::static_dimension::template prepend::type + static_dimension ; + + typedef typename nested::dynamic_dimension dynamic_dimension ; + + typedef typename + ViewDimensionJoin< dynamic_dimension , static_dimension >::type + dimension ; +}; + +template< class T > +struct ViewArrayAnalysis< T[] > +{ +private: + typedef ViewArrayAnalysis< T > nested ; + typedef typename nested::dimension nested_dimension ; +public: + typedef typename nested::value_type value_type ; + typedef typename nested::const_value_type const_value_type ; + typedef typename nested::non_const_value_type non_const_value_type ; + + typedef typename nested::dynamic_dimension::template prepend<0>::type + dynamic_dimension ; + + typedef typename nested::static_dimension static_dimension ; + + typedef typename + ViewDimensionJoin< dynamic_dimension , static_dimension >::type + dimension ; +}; + +template< class T > +struct ViewArrayAnalysis< T* > +{ +private: + typedef ViewArrayAnalysis< T > nested ; +public: + typedef typename nested::value_type value_type ; + typedef typename nested::const_value_type const_value_type ; + typedef typename nested::non_const_value_type non_const_value_type ; + + typedef typename nested::dynamic_dimension::template prepend<0>::type + dynamic_dimension ; + + typedef typename nested::static_dimension static_dimension ; + + typedef typename + ViewDimensionJoin< dynamic_dimension , static_dimension >::type + dimension ; +}; + + +template< class DataType , class ArrayLayout , class ValueType > struct ViewDataAnalysis { private: @@ -917,17 +974,10 @@ struct ViewOffset< Dimension , Kokkos::LayoutLeft template< class DimRHS > KOKKOS_INLINE_FUNCTION - constexpr ViewOffset( const ViewOffset< DimRHS , Kokkos::LayoutLeft , void > & rhs - , const size_t n0 - , const size_t - , const size_t - , const size_t - , const size_t - , const size_t - , const size_t - , const size_t - ) - : m_dim( n0, 0, 0, 0, 0, 0, 0, 0 ) + constexpr ViewOffset( + const ViewOffset< DimRHS , Kokkos::LayoutLeft , void > & rhs , + const SubviewExtents< DimRHS::rank , dimension_type::rank > & sub ) + : m_dim( sub.range_extent(0), 0, 0, 0, 0, 0, 0, 0 ) { static_assert( ( 0 == dimension_type::rank ) || ( 1 == dimension_type::rank && 1 == dimension_type::rank_dynamic && 1 <= DimRHS::rank ) @@ -1142,41 +1192,27 @@ public: //---------------------------------------- // Subview construction + // This subview must be 2 == rank and 2 == rank_dynamic + // due to only having stride #0. + // The source dimension #0 must be non-zero for stride-one leading dimension. + // At most subsequent dimension can be non-zero. template< class DimRHS > KOKKOS_INLINE_FUNCTION - constexpr ViewOffset( const ViewOffset< DimRHS , Kokkos::LayoutLeft , void > & rhs - , const size_t aN0 - , const size_t aN1 - , const size_t aN2 - , const size_t aN3 - , const size_t aN4 - , const size_t aN5 - , const size_t aN6 - , const size_t aN7 - ) - : m_dim( aN0 - , ( 1 < DimRHS::rank && aN1 ? aN1 : - ( 2 < DimRHS::rank && aN2 ? aN2 : - ( 3 < DimRHS::rank && aN3 ? aN3 : - ( 4 < DimRHS::rank && aN4 ? aN4 : - ( 5 < DimRHS::rank && aN5 ? aN5 : - ( 6 < DimRHS::rank && aN6 ? aN6 : - ( 7 < DimRHS::rank && aN7 ? aN7 : 0 ))))))) + constexpr ViewOffset + ( const ViewOffset< DimRHS , Kokkos::LayoutLeft , void > & rhs , + const SubviewExtents< DimRHS::rank , dimension_type::rank > & sub ) + : m_dim( sub.range_extent(0) + , sub.range_extent(1) , 0, 0, 0, 0, 0, 0 ) - , m_stride( ( 1 < DimRHS::rank && aN1 ? rhs.stride_1() : - ( 2 < DimRHS::rank && aN2 ? rhs.stride_2() : - ( 3 < DimRHS::rank && aN3 ? rhs.stride_3() : - ( 4 < DimRHS::rank && aN4 ? rhs.stride_4() : - ( 5 < DimRHS::rank && aN5 ? rhs.stride_5() : - ( 6 < DimRHS::rank && aN6 ? rhs.stride_6() : - ( 7 < DimRHS::rank && aN7 ? rhs.stride_7() : 0 ))))))) ) + , m_stride( ( 1 == sub.range_index(1) ? rhs.stride_1() : + ( 2 == sub.range_index(1) ? rhs.stride_2() : + ( 3 == sub.range_index(1) ? rhs.stride_3() : + ( 4 == sub.range_index(1) ? rhs.stride_4() : + ( 5 == sub.range_index(1) ? rhs.stride_5() : + ( 6 == sub.range_index(1) ? rhs.stride_6() : + ( 7 == sub.range_index(1) ? rhs.stride_7() : 0 )))))))) { - // This subview must be 2 == rank and 2 == rank_dynamic - // due to only having stride #0. - // The source dimension #0 must be non-zero for stride-one leading dimension. - // At most subsequent dimension can be non-zero. - static_assert( ( 2 == dimension_type::rank ) && ( 2 == dimension_type::rank_dynamic ) && ( 2 <= DimRHS::rank ) @@ -1391,17 +1427,11 @@ struct ViewOffset< Dimension , Kokkos::LayoutRight template< class DimRHS > KOKKOS_INLINE_FUNCTION - constexpr ViewOffset( const ViewOffset< DimRHS , Kokkos::LayoutRight , void > & rhs - , const size_t n0 - , const size_t - , const size_t - , const size_t - , const size_t - , const size_t - , const size_t - , const size_t - ) - : m_dim( n0, 0, 0, 0, 0, 0, 0, 0 ) + constexpr ViewOffset + ( const ViewOffset< DimRHS , Kokkos::LayoutRight , void > & rhs + , const SubviewExtents< DimRHS::rank , dimension_type::rank > & sub + ) + : m_dim( sub.range_extent(0) , 0, 0, 0, 0, 0, 0, 0 ) { static_assert( ( 0 == dimension_type::rank ) || ( 1 == dimension_type::rank && 1 == dimension_type::rank_dynamic && 1 <= DimRHS::rank ) @@ -1627,39 +1657,20 @@ public: template< class DimRHS > KOKKOS_INLINE_FUNCTION - constexpr ViewOffset( const ViewOffset< DimRHS , Kokkos::LayoutRight , void > & rhs - , const size_t aN0 - , const size_t aN1 - , const size_t aN2 - , const size_t aN3 - , const size_t aN4 - , const size_t aN5 - , const size_t aN6 - , const size_t aN7 - ) - : m_dim( // N0 == First non-zero dimension before the last dimension. - ( 1 < DimRHS::rank && aN0 ? aN0 : - ( 2 < DimRHS::rank && aN1 ? aN1 : - ( 3 < DimRHS::rank && aN2 ? aN2 : - ( 4 < DimRHS::rank && aN3 ? aN3 : - ( 5 < DimRHS::rank && aN4 ? aN4 : - ( 6 < DimRHS::rank && aN5 ? aN5 : - ( 7 < DimRHS::rank && aN6 ? aN6 : 0 ))))))) - , // N1 == Last dimension. - ( 2 == DimRHS::rank ? aN1 : - ( 3 == DimRHS::rank ? aN2 : - ( 4 == DimRHS::rank ? aN3 : - ( 5 == DimRHS::rank ? aN4 : - ( 6 == DimRHS::rank ? aN5 : - ( 7 == DimRHS::rank ? aN6 : aN7 )))))) + constexpr ViewOffset + ( const ViewOffset< DimRHS , Kokkos::LayoutRight , void > & rhs + , const SubviewExtents< DimRHS::rank , dimension_type::rank > & sub + ) + : m_dim( sub.range_extent(0) + , sub.range_extent(1) , 0, 0, 0, 0, 0, 0 ) - , m_stride( ( 1 < DimRHS::rank && aN0 ? rhs.stride_0() : - ( 2 < DimRHS::rank && aN1 ? rhs.stride_1() : - ( 3 < DimRHS::rank && aN2 ? rhs.stride_2() : - ( 4 < DimRHS::rank && aN3 ? rhs.stride_3() : - ( 5 < DimRHS::rank && aN4 ? rhs.stride_4() : - ( 6 < DimRHS::rank && aN5 ? rhs.stride_5() : - ( 7 < DimRHS::rank && aN6 ? rhs.stride_6() : 0 ))))))) ) + , m_stride( 0 == sub.range_index(0) ? rhs.stride_0() : ( + 1 == sub.range_index(0) ? rhs.stride_1() : ( + 2 == sub.range_index(0) ? rhs.stride_2() : ( + 3 == sub.range_index(0) ? rhs.stride_3() : ( + 4 == sub.range_index(0) ? rhs.stride_4() : ( + 5 == sub.range_index(0) ? rhs.stride_5() : ( + 6 == sub.range_index(0) ? rhs.stride_6() : 0 ))))))) { // This subview must be 2 == rank and 2 == rank_dynamic // due to only having stride #0. @@ -2027,198 +2038,50 @@ public: private: - KOKKOS_INLINE_FUNCTION - static constexpr unsigned - count_non_zero( const size_t aN0 = 0 - , const size_t aN1 = 0 - , const size_t aN2 = 0 - , const size_t aN3 = 0 - , const size_t aN4 = 0 - , const size_t aN5 = 0 - , const size_t aN6 = 0 - , const size_t aN7 = 0 - ) + template< class DimRHS , class LayoutRHS > + KOKKOS_INLINE_FUNCTION static + constexpr size_t stride + ( unsigned r , const ViewOffset< DimRHS , LayoutRHS , void > & rhs ) { - return ( aN0 ? 1 : 0 ) + - ( aN1 ? 1 : 0 ) + - ( aN2 ? 1 : 0 ) + - ( aN3 ? 1 : 0 ) + - ( aN4 ? 1 : 0 ) + - ( aN5 ? 1 : 0 ) + - ( aN6 ? 1 : 0 ) + - ( aN7 ? 1 : 0 ); + return r > 7 ? 0 : ( + r == 0 ? rhs.stride_0() : ( + r == 1 ? rhs.stride_1() : ( + r == 2 ? rhs.stride_2() : ( + r == 3 ? rhs.stride_3() : ( + r == 4 ? rhs.stride_4() : ( + r == 5 ? rhs.stride_5() : ( + r == 6 ? rhs.stride_6() : rhs.stride_7() ))))))); } - template< unsigned Rank , unsigned I > - KOKKOS_INLINE_FUNCTION - static constexpr size_t - get_non_zero( const size_t aN0 - , const size_t aN1 - , const size_t aN2 - , const size_t aN3 - , const size_t aN4 - , const size_t aN5 - , const size_t aN6 - , const size_t aN7 - ) - { - return ( 0 < Rank && I < 1 && aN0 ? aN0 : - ( 1 < Rank && I < 2 && I == count_non_zero(aN0) && aN1 ? aN1 : - ( 2 < Rank && I < 3 && I == count_non_zero(aN0,aN1) && aN2 ? aN2 : - ( 3 < Rank && I < 4 && I == count_non_zero(aN0,aN1,aN2) && aN3 ? aN3 : - ( 4 < Rank && I < 5 && I == count_non_zero(aN0,aN1,aN2,aN3) && aN4 ? aN4 : - ( 5 < Rank && I < 6 && I == count_non_zero(aN0,aN1,aN2,aN3,aN4) && aN5 ? aN5 : - ( 6 < Rank && I < 7 && I == count_non_zero(aN0,aN1,aN2,aN3,aN4,aN5) && aN6 ? aN6 : - ( 7 < Rank && I < 8 && I == count_non_zero(aN0,aN1,aN2,aN3,aN4,aN5,aN6) && aN7 ? aN7 : 0 )))))))); - } - - template< unsigned Rank , unsigned I , class DimRHS , class LayoutRHS > - KOKKOS_INLINE_FUNCTION - static constexpr size_t - get_non_zero( const size_t aN0 , const size_t aN1 , const size_t aN2 , const size_t aN3 - , const size_t aN4 , const size_t aN5 , const size_t aN6 , const size_t aN7 - , const ViewOffset< DimRHS , LayoutRHS , void > & rhs ) - { - return ( 0 < Rank && I < 1 && aN0 ? rhs.stride_0() : - ( 1 < Rank && I < 2 && I == count_non_zero(aN0) && aN1 ? rhs.stride_1() : - ( 2 < Rank && I < 3 && I == count_non_zero(aN0,aN1) && aN2 ? rhs.stride_2() : - ( 3 < Rank && I < 4 && I == count_non_zero(aN0,aN1,aN2) && aN3 ? rhs.stride_3() : - ( 4 < Rank && I < 5 && I == count_non_zero(aN0,aN1,aN2,aN3) && aN4 ? rhs.stride_4() : - ( 5 < Rank && I < 6 && I == count_non_zero(aN0,aN1,aN2,aN3,aN4) && aN5 ? rhs.stride_5() : - ( 6 < Rank && I < 7 && I == count_non_zero(aN0,aN1,aN2,aN3,aN4,aN5) && aN6 ? rhs.stride_6() : - ( 7 < Rank && I < 8 && I == count_non_zero(aN0,aN1,aN2,aN3,aN4,aN5,aN6) && aN7 ? rhs.stride_7() : 0 )))))))); - } - - public: template< class DimRHS , class LayoutRHS > KOKKOS_INLINE_FUNCTION - constexpr ViewOffset( const ViewOffset< DimRHS , LayoutRHS , void > & rhs - , const size_t aN0 - , const size_t aN1 - , const size_t aN2 - , const size_t aN3 - , const size_t aN4 - , const size_t aN5 - , const size_t aN6 - , const size_t aN7 - ) - // Contract the non-zero dimensions - : m_dim( ViewOffset::template get_non_zero( aN0, aN1, aN2, aN3, aN4, aN5, aN6, aN7 ) - , ViewOffset::template get_non_zero( aN0, aN1, aN2, aN3, aN4, aN5, aN6, aN7 ) - , ViewOffset::template get_non_zero( aN0, aN1, aN2, aN3, aN4, aN5, aN6, aN7 ) - , ViewOffset::template get_non_zero( aN0, aN1, aN2, aN3, aN4, aN5, aN6, aN7 ) - , ViewOffset::template get_non_zero( aN0, aN1, aN2, aN3, aN4, aN5, aN6, aN7 ) - , ViewOffset::template get_non_zero( aN0, aN1, aN2, aN3, aN4, aN5, aN6, aN7 ) - , ViewOffset::template get_non_zero( aN0, aN1, aN2, aN3, aN4, aN5, aN6, aN7 ) - , ViewOffset::template get_non_zero( aN0, aN1, aN2, aN3, aN4, aN5, aN6, aN7 ) + constexpr ViewOffset + ( const ViewOffset< DimRHS , LayoutRHS , void > & rhs + , const SubviewExtents< DimRHS::rank , dimension_type::rank > & sub + ) + // range_extent(r) returns 0 when dimension_type::rank <= r + : m_dim( sub.range_extent(0) + , sub.range_extent(1) + , sub.range_extent(2) + , sub.range_extent(3) + , sub.range_extent(4) + , sub.range_extent(5) + , sub.range_extent(6) + , sub.range_extent(7) ) - , m_stride( ViewOffset::template get_non_zero( aN0, aN1, aN2, aN3, aN4, aN5, aN6, aN7, rhs ) - , ViewOffset::template get_non_zero( aN0, aN1, aN2, aN3, aN4, aN5, aN6, aN7, rhs ) - , ViewOffset::template get_non_zero( aN0, aN1, aN2, aN3, aN4, aN5, aN6, aN7, rhs ) - , ViewOffset::template get_non_zero( aN0, aN1, aN2, aN3, aN4, aN5, aN6, aN7, rhs ) - , ViewOffset::template get_non_zero( aN0, aN1, aN2, aN3, aN4, aN5, aN6, aN7, rhs ) - , ViewOffset::template get_non_zero( aN0, aN1, aN2, aN3, aN4, aN5, aN6, aN7, rhs ) - , ViewOffset::template get_non_zero( aN0, aN1, aN2, aN3, aN4, aN5, aN6, aN7, rhs ) - , ViewOffset::template get_non_zero( aN0, aN1, aN2, aN3, aN4, aN5, aN6, aN7, rhs ) + // range_index(r) returns ~0u when dimension_type::rank <= r + , m_stride( stride( sub.range_index(0), rhs ) + , stride( sub.range_index(1), rhs ) + , stride( sub.range_index(2), rhs ) + , stride( sub.range_index(3), rhs ) + , stride( sub.range_index(4), rhs ) + , stride( sub.range_index(5), rhs ) + , stride( sub.range_index(6), rhs ) + , stride( sub.range_index(7), rhs ) ) - { - } - - //---------------------------------------- -}; - -}}} // namespace Kokkos::Experimental::Impl - -//---------------------------------------------------------------------------- -//---------------------------------------------------------------------------- - -namespace Kokkos { -namespace Experimental { -namespace Impl { - -struct ALL_t { - KOKKOS_INLINE_FUNCTION - constexpr const ALL_t & operator()() const { return *this ; } -}; - -template< class T > -struct ViewOffsetRange { - - static_assert( std::is_integral::value , "Non-range must be an integral type" ); - - enum { is_range = false }; - - KOKKOS_INLINE_FUNCTION static - size_t dimension( size_t const , T const & ) { return 0 ; } - - KOKKOS_INLINE_FUNCTION static - size_t begin( T const & i ) { return size_t(i) ; } -}; - -template<> -struct ViewOffsetRange { - enum { is_range = false }; -}; - -template<> -struct ViewOffsetRange< Kokkos::Experimental::Impl::ALL_t > { - enum { is_range = true }; - - KOKKOS_INLINE_FUNCTION static - size_t dimension( size_t const n , Experimental::Impl::ALL_t const & ) { return n ; } - - KOKKOS_INLINE_FUNCTION static - size_t begin( Experimental::Impl::ALL_t const & ) { return 0 ; } -}; - -template< typename iType > -struct ViewOffsetRange< std::pair > { - - static_assert( std::is_integral::value , "Range bounds must be an integral type" ); - - enum { is_range = true }; - - KOKKOS_INLINE_FUNCTION static - size_t dimension( size_t const n , std::pair const & r ) - { return ( size_t(r.first) < size_t(r.second) && size_t(r.second) <= n ) ? size_t(r.second) - size_t(r.first) : 0 ; } - - KOKKOS_INLINE_FUNCTION static - size_t begin( std::pair const & r ) { return size_t(r.first) ; } -}; - -template< typename iType > -struct ViewOffsetRange< Kokkos::pair > { - - static_assert( std::is_integral::value , "Range bounds must be an integral type" ); - - enum { is_range = true }; - - KOKKOS_INLINE_FUNCTION static - size_t dimension( size_t const n , Kokkos::pair const & r ) - { return ( size_t(r.first) < size_t(r.second) && size_t(r.second) <= n ) ? size_t(r.second) - size_t(r.first) : 0 ; } - - KOKKOS_INLINE_FUNCTION static - size_t begin( Kokkos::pair const & r ) { return size_t(r.first) ; } -}; - -template< typename iType > -struct ViewOffsetRange< std::initializer_list< iType > > { - - static_assert( std::is_integral::value , "Range bounds must be an integral type" ); - - enum { is_range = true }; - - KOKKOS_INLINE_FUNCTION static - size_t dimension( size_t const n , std::initializer_list< iType > const & r ) - { - return ( size_t(r.begin()[0]) < size_t(r.begin()[1]) && size_t(r.begin()[1]) <= n ) - ? size_t(r.begin()[1]) - size_t(r.begin()[0]) : 0 ; - } - - KOKKOS_INLINE_FUNCTION static - size_t begin( std::initializer_list< iType > const & r ) { return size_t(r.begin()[0]) ; } + {} }; }}} // namespace Kokkos::Experimental::Impl @@ -2290,21 +2153,93 @@ namespace Kokkos { namespace Experimental { namespace Impl { -template< class Traits - , bool R0 = false - , bool R1 = false - , bool R2 = false - , bool R3 = false - , bool R4 = false - , bool R5 = false - , bool R6 = false - , bool R7 = false - , typename Enable = void > -struct SubviewMapping ; +//---------------------------------------------------------------------------- +template< class ValueType , class ExecSpace + , bool IsScalar = std::is_scalar< ValueType >::value > +struct ViewValueFunctor ; + +/* + * The construction, assignment to default, and destruction + * are merged into a single functor. + * Primarily to work around an unresolved CUDA back-end bug + * that would lose the destruction cuda device function when + * called from the shared memory tracking destruction. + * Secondarily to have two fewer partial specializations. + */ +template< class ValueType , class ExecSpace > +struct ViewValueFunctor< ValueType , ExecSpace , false > +{ + enum { CONSTRUCT = 0x01 , ASSIGN = 0x02 , DESTROY = 0x04 }; + + ValueType * const ptr ; + int const mode ; + + KOKKOS_INLINE_FUNCTION + void operator()( size_t i ) const + { + if ( mode == CONSTRUCT ) { new (ptr+i) ValueType(); } + else if ( mode == ASSIGN ) { ptr[i] = ValueType(); } + else if ( mode == DESTROY ) { (ptr+i)->~ValueType(); } + } + + ViewValueFunctor( const ExecSpace & arg_space + , ValueType * const arg_ptr + , size_t const arg_n + , int const arg_mode ) + : ptr( arg_ptr ) + , mode( arg_mode ) + { + if ( ! arg_space.in_parallel() ) { + typedef Kokkos::RangePolicy< ExecSpace > PolicyType ; + const Kokkos::Impl::ParallelFor< ViewValueFunctor , PolicyType > + closure( *this , PolicyType( 0 , arg_n ) ); + closure.execute(); + arg_space.fence(); + } + else { + for ( size_t i = 0 ; i < arg_n ; ++i ) operator()(i); + } + } +}; + +template< class ValueType , class ExecSpace > +struct ViewValueFunctor< ValueType , ExecSpace , true > +{ + enum { CONSTRUCT = 0x01 , ASSIGN = 0x02 , DESTROY = 0x04 }; + + ValueType * const ptr ; + int const mode ; + + KOKKOS_INLINE_FUNCTION + void operator()( size_t i ) const { ptr[i] = 0 ; } + + ViewValueFunctor( const ExecSpace & arg_space + , ValueType * const arg_ptr + , size_t const arg_n + , int const arg_mode ) + : ptr( arg_ptr ) + , mode( arg_mode ) + { + if ( mode == CONSTRUCT || mode == ASSIGN ) { + if ( ! arg_space.in_parallel() ) { + typedef Kokkos::RangePolicy< ExecSpace > PolicyType ; + const Kokkos::Impl::ParallelFor< ViewValueFunctor , PolicyType > + closure( *this , PolicyType( 0 , arg_n ) ); + closure.execute(); + arg_space.fence(); + } + else { + for ( size_t i = 0 ; i < arg_n ; ++i ) operator()(i); + } + } + } +}; + +//---------------------------------------------------------------------------- /** \brief View mapping for non-specialized data type and standard layout */ template< class Traits > -class ViewMapping< Traits , void , +class ViewMapping< Traits , typename std::enable_if<( std::is_same< typename Traits::specialize , void >::value && @@ -2315,9 +2250,8 @@ class ViewMapping< Traits , void , { private: - template< class , class , typename > friend class ViewMapping ; - template< class , bool , bool , bool , bool , bool , bool , bool , bool , class > friend struct SubviewMapping ; - template< class , class , class , class > friend class Kokkos::Experimental::View ; + template< class , class ... > friend class ViewMapping ; + template< class , class ... > friend class Kokkos::Experimental::View ; typedef ViewOffset< typename Traits::dimension , typename Traits::array_layout @@ -2363,6 +2297,9 @@ public: KOKKOS_INLINE_FUNCTION constexpr size_t stride_6() const { return m_offset.stride_6(); } KOKKOS_INLINE_FUNCTION constexpr size_t stride_7() const { return m_offset.stride_7(); } + template< typename iType > + KOKKOS_INLINE_FUNCTION void stride( iType * const s ) const { m_offset.stride(s); } + //---------------------------------------- // Range span @@ -2373,15 +2310,14 @@ public: KOKKOS_INLINE_FUNCTION constexpr bool span_is_contiguous() const { return m_offset.span_is_contiguous(); } typedef typename ViewDataHandle< Traits >::return_type reference_type ; + typedef typename Traits::value_type * pointer_type ; /** \brief If data references are lvalue_reference than can query pointer to memory */ - KOKKOS_INLINE_FUNCTION constexpr typename Traits::value_type * data() const + KOKKOS_INLINE_FUNCTION constexpr pointer_type data() const { - typedef typename Traits::value_type * ptr_type ; - return std::is_lvalue_reference< reference_type >::value - ? (ptr_type) m_handle - : (ptr_type) 0 ; + ? (pointer_type) m_handle + : (pointer_type) 0 ; } //---------------------------------------- @@ -2501,85 +2437,43 @@ public: template< bool AllowPadding > KOKKOS_INLINE_FUNCTION - ViewMapping( void * ptr + ViewMapping( pointer_type ptr , const std::integral_constant & , const size_t N0 , const size_t N1 , const size_t N2 , const size_t N3 , const size_t N4 , const size_t N5 , const size_t N6 , const size_t N7 ) - : m_handle( reinterpret_cast< handle_type >( ptr ) ) + : m_handle( ptr ) , m_offset( std::integral_constant< unsigned , AllowPadding ? sizeof(typename Traits::value_type) : 0 >() , N0, N1, N2, N3, N4, N5, N6, N7 ) {} template< bool AllowPadding > KOKKOS_INLINE_FUNCTION - ViewMapping( void * ptr + ViewMapping( pointer_type ptr , const std::integral_constant & , const typename Traits::array_layout & layout ) - : m_handle( reinterpret_cast< handle_type >( ptr ) ) + : m_handle( ptr ) , m_offset( layout ) {} //---------------------------------------- // If the View is to construct or destroy the elements. - struct FunctorTagConstructScalar {}; - struct FunctorTagConstructNonScalar {}; - struct FunctorTagDestructNonScalar {}; - - KOKKOS_FORCEINLINE_FUNCTION - void operator()( const FunctorTagConstructScalar & , const size_t i ) const - { m_handle[i] = 0 ; } - - KOKKOS_FORCEINLINE_FUNCTION - void operator()( const FunctorTagConstructNonScalar & , const size_t i ) const - { - typedef typename Traits::value_type value_type ; - new( & m_handle[i] ) value_type(); - } - - KOKKOS_FORCEINLINE_FUNCTION - void operator()( const FunctorTagDestructNonScalar & , const size_t i ) const - { - typedef typename Traits::value_type value_type ; - ( & (m_handle[i]) )->~value_type(); - } - template< class ExecSpace > - typename std::enable_if< Kokkos::Impl::is_execution_space::value && - std::is_scalar< typename Traits::value_type >::value >::type - construct( const ExecSpace & space ) const + void construct( const ExecSpace & space ) const { - typedef Kokkos::RangePolicy< ExecSpace , FunctorTagConstructScalar , size_t > Policy ; + typedef typename Traits::value_type value_type ; + typedef ViewValueFunctor< value_type , ExecSpace > FunctorType ; - (void) Kokkos::Impl::ParallelFor< ViewMapping , Policy >( *this , Policy( 0 , m_offset.span() ) ); - ExecSpace::fence(); + (void) FunctorType( space , (value_type *) m_handle , m_offset.span() , FunctorType::CONSTRUCT ); } template< class ExecSpace > - typename std::enable_if< Kokkos::Impl::is_execution_space::value && - ! std::is_scalar< typename Traits::value_type >::value >::type - construct( const ExecSpace & space ) const + void destroy( const ExecSpace & space ) const { - typedef Kokkos::RangePolicy< ExecSpace , FunctorTagConstructNonScalar , size_t > Policy ; + typedef typename Traits::value_type value_type ; + typedef ViewValueFunctor< value_type , ExecSpace > FunctorType ; - (void) Kokkos::Impl::ParallelFor< ViewMapping , Policy >( *this , Policy( 0 , m_offset.span() ) ); - ExecSpace::fence(); - } - - template< class ExecSpace > - typename std::enable_if< Kokkos::Impl::is_execution_space::value && - std::is_scalar< typename Traits::value_type >::value >::type - destroy( const ExecSpace & ) const {} - - template< class ExecSpace > - typename std::enable_if< Kokkos::Impl::is_execution_space::value && - ! std::is_scalar< typename Traits::value_type >::value >::type - destroy( const ExecSpace & space ) const - { - typedef Kokkos::RangePolicy< ExecSpace , FunctorTagDestructNonScalar , size_t > Policy ; - - (void) Kokkos::Impl::ParallelFor< ViewMapping , Policy >( *this , Policy( 0 , m_offset.span() ) ); - ExecSpace::fence(); + (void) FunctorType( space , (value_type *) m_handle , m_offset.span() , FunctorType::DESTROY ); } }; @@ -2609,28 +2503,48 @@ class ViewMapping< DstTraits , SrcTraits , ) )>::type > { +private: + + enum { is_assignable_value_type = + std::is_same< typename DstTraits::value_type + , typename SrcTraits::value_type >::value || + std::is_same< typename DstTraits::value_type + , typename SrcTraits::const_value_type >::value }; + + enum { is_assignable_dimension = + ViewDimensionAssignable< typename DstTraits::dimension + , typename SrcTraits::dimension >::value }; + + enum { is_assignable_layout = + std::is_same< typename DstTraits::array_layout + , typename SrcTraits::array_layout >::value || + std::is_same< typename DstTraits::array_layout + , Kokkos::LayoutStride >::value || + ( DstTraits::dimension::rank == 0 ) || + ( DstTraits::dimension::rank == 1 && + DstTraits::dimension::rank_dynamic == 1 ) + }; + public: - enum { is_assignable = true }; + enum { is_assignable = is_assignable_value_type && + is_assignable_dimension && + is_assignable_layout }; typedef Kokkos::Experimental::Impl::SharedAllocationTracker TrackType ; - typedef ViewMapping< DstTraits , void , void > DstType ; - typedef ViewMapping< SrcTraits , void , void > SrcType ; + typedef ViewMapping< DstTraits , void > DstType ; + typedef ViewMapping< SrcTraits , void > SrcType ; KOKKOS_INLINE_FUNCTION static void assign( DstType & dst , const SrcType & src , const TrackType & src_track ) { - static_assert( std::is_same< typename DstTraits::value_type , typename SrcTraits::value_type >::value || - std::is_same< typename DstTraits::value_type , typename SrcTraits::const_value_type >::value + static_assert( is_assignable_value_type , "View assignment must have same value type or const = non-const" ); - static_assert( ViewDimensionAssignable< typename DstTraits::dimension , typename SrcTraits::dimension >::value + static_assert( is_assignable_dimension , "View assignment must have compatible dimensions" ); - static_assert( std::is_same< typename DstTraits::array_layout , typename SrcTraits::array_layout >::value || - std::is_same< typename DstTraits::array_layout , Kokkos::LayoutStride >::value || - ( DstTraits::dimension::rank == 0 ) || - ( DstTraits::dimension::rank == 1 && DstTraits::dimension::rank_dynamic == 1 ) + static_assert( is_assignable_layout , "View assignment must have compatible layout or have rank <= 1" ); typedef typename DstType::offset_type dst_offset_type ; @@ -2642,35 +2556,55 @@ public: //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- +// Subview mapping. +// Deduce destination view type from source view traits and subview arguments -/** \brief View mapping for non-specialized data type and standard layout */ -template< class Traits , bool R0 , bool R1 , bool R2 , bool R3 , bool R4 , bool R5 , bool R6 , bool R7 > -struct SubviewMapping< Traits, R0, R1, R2, R3, R4, R5, R6, R7 , - typename std::enable_if<( - std::is_same< typename Traits::specialize , void >::value - && - ( - std::is_same< typename Traits::array_layout , Kokkos::LayoutLeft >::value || - std::is_same< typename Traits::array_layout , Kokkos::LayoutRight >::value || - std::is_same< typename Traits::array_layout , Kokkos::LayoutStride >::value - ) - )>::type > +template< class SrcTraits , class ... Args > +struct ViewMapping + < typename std::enable_if<( + std::is_same< typename SrcTraits::specialize , void >::value + && + ( + std::is_same< typename SrcTraits::array_layout + , Kokkos::LayoutLeft >::value || + std::is_same< typename SrcTraits::array_layout + , Kokkos::LayoutRight >::value || + std::is_same< typename SrcTraits::array_layout + , Kokkos::LayoutStride >::value + ) + )>::type + , SrcTraits + , Args ... > { private: - // Subview's rank + static_assert( SrcTraits::rank == sizeof...(Args) , + "Subview mapping requires one argument for each dimension of source View" ); + + enum + { RZ = false + , R0 = bool(is_integral_extent<0,Args...>::value) + , R1 = bool(is_integral_extent<1,Args...>::value) + , R2 = bool(is_integral_extent<2,Args...>::value) + , R3 = bool(is_integral_extent<3,Args...>::value) + , R4 = bool(is_integral_extent<4,Args...>::value) + , R5 = bool(is_integral_extent<5,Args...>::value) + , R6 = bool(is_integral_extent<6,Args...>::value) + , R7 = bool(is_integral_extent<7,Args...>::value) + }; + enum { rank = unsigned(R0) + unsigned(R1) + unsigned(R2) + unsigned(R3) + unsigned(R4) + unsigned(R5) + unsigned(R6) + unsigned(R7) }; // Whether right-most rank is a range. - enum { R0_rev = 0 == Traits::rank ? false : ( - 1 == Traits::rank ? R0 : ( - 2 == Traits::rank ? R1 : ( - 3 == Traits::rank ? R2 : ( - 4 == Traits::rank ? R3 : ( - 5 == Traits::rank ? R4 : ( - 6 == Traits::rank ? R5 : ( - 7 == Traits::rank ? R6 : R7 ))))))) }; + enum { R0_rev = ( 0 == SrcTraits::rank ? RZ : ( + 1 == SrcTraits::rank ? R0 : ( + 2 == SrcTraits::rank ? R1 : ( + 3 == SrcTraits::rank ? R2 : ( + 4 == SrcTraits::rank ? R3 : ( + 5 == SrcTraits::rank ? R4 : ( + 6 == SrcTraits::rank ? R5 : ( + 7 == SrcTraits::rank ? R6 : R7 )))))))) }; // Subview's layout typedef typename std::conditional< @@ -2679,15 +2613,15 @@ private: || // OutputRank 1 or 2, InputLayout Left, Interval 0 // because single stride one or second index has a stride. - ( rank <= 2 && R0 && std::is_same< typename Traits::array_layout , Kokkos::LayoutLeft >::value ) + ( rank <= 2 && R0 && std::is_same< typename SrcTraits::array_layout , Kokkos::LayoutLeft >::value ) || // OutputRank 1 or 2, InputLayout Right, Interval [InputRank-1] // because single stride one or second index has a stride. - ( rank <= 2 && R0_rev && std::is_same< typename Traits::array_layout , Kokkos::LayoutRight >::value ) - ), typename Traits::array_layout , Kokkos::LayoutStride + ( rank <= 2 && R0_rev && std::is_same< typename SrcTraits::array_layout , Kokkos::LayoutRight >::value ) + ), typename SrcTraits::array_layout , Kokkos::LayoutStride >::type array_layout ; - typedef typename Traits::value_type value_type ; + typedef typename SrcTraits::value_type value_type ; typedef typename std::conditional< rank == 0 , value_type , typename std::conditional< rank == 1 , value_type * , @@ -2703,66 +2637,66 @@ private: public: - typedef - Kokkos::Experimental::ViewTraits< data_type , array_layout - , typename Traits::device_type - , typename Traits::memory_traits > traits_type ; + typedef Kokkos::Experimental::ViewTraits + < data_type + , array_layout + , typename SrcTraits::device_type + , typename SrcTraits::memory_traits > traits_type ; - typedef Kokkos::Experimental::View< data_type - , array_layout - , typename Traits::device_type - , typename Traits::memory_traits > type ; + typedef Kokkos::Experimental::View + < data_type + , array_layout + , typename SrcTraits::device_type + , typename SrcTraits::memory_traits > type ; - template< class T0 , class T1 , class T2 , class T3 - , class T4 , class T5 , class T6 , class T7 > + template< class MemoryTraits > + struct apply { + + static_assert( Kokkos::Impl::is_memory_traits< MemoryTraits >::value , "" ); + + typedef Kokkos::Experimental::ViewTraits + < data_type + , array_layout + , typename SrcTraits::device_type + , MemoryTraits > traits_type ; + + typedef Kokkos::Experimental::View + < data_type + , array_layout + , typename SrcTraits::device_type + , MemoryTraits > type ; + }; + + // The presumed type is 'ViewMapping< traits_type , void >' + // However, a compatible ViewMapping is acceptable. + template< class DstTraits > KOKKOS_INLINE_FUNCTION - static void assign( ViewMapping< traits_type , void , void > & dst - , ViewMapping< Traits , void , void > const & src - , T0 const & arg0 - , T1 const & arg1 - , T2 const & arg2 - , T3 const & arg3 - , T4 const & arg4 - , T5 const & arg5 - , T6 const & arg6 - , T7 const & arg7 - ) + static void assign( ViewMapping< DstTraits , void > & dst + , ViewMapping< SrcTraits , void > const & src + , Args ... args ) { - typedef ViewMapping< traits_type , void , void > DstType ; + static_assert( + ViewMapping< DstTraits , traits_type , void >::is_assignable , + "Subview destination type must be compatible with subview derived type" ); + + typedef ViewMapping< DstTraits , void > DstType ; typedef typename DstType::offset_type dst_offset_type ; typedef typename DstType::handle_type dst_handle_type ; - typedef Kokkos::Experimental::Impl::ViewOffsetRange V0 ; - typedef Kokkos::Experimental::Impl::ViewOffsetRange V1 ; - typedef Kokkos::Experimental::Impl::ViewOffsetRange V2 ; - typedef Kokkos::Experimental::Impl::ViewOffsetRange V3 ; - typedef Kokkos::Experimental::Impl::ViewOffsetRange V4 ; - typedef Kokkos::Experimental::Impl::ViewOffsetRange V5 ; - typedef Kokkos::Experimental::Impl::ViewOffsetRange V6 ; - typedef Kokkos::Experimental::Impl::ViewOffsetRange V7 ; - - dst.m_offset = dst_offset_type - ( src.m_offset - , V0::dimension( src.m_offset.dimension_0() , arg0 ) - , V1::dimension( src.m_offset.dimension_1() , arg1 ) - , V2::dimension( src.m_offset.dimension_2() , arg2 ) - , V3::dimension( src.m_offset.dimension_3() , arg3 ) - , V4::dimension( src.m_offset.dimension_4() , arg4 ) - , V5::dimension( src.m_offset.dimension_5() , arg5 ) - , V6::dimension( src.m_offset.dimension_6() , arg6 ) - , V7::dimension( src.m_offset.dimension_7() , arg7 ) - ); + const SubviewExtents< SrcTraits::rank , rank > + extents( src.m_offset.m_dim , args... ); + dst.m_offset = dst_offset_type( src.m_offset , extents ); dst.m_handle = dst_handle_type( src.m_handle + - src.m_offset( V0::begin( arg0 ) - , V1::begin( arg1 ) - , V2::begin( arg2 ) - , V3::begin( arg3 ) - , V4::begin( arg4 ) - , V5::begin( arg5 ) - , V6::begin( arg6 ) - , V7::begin( arg7 ) + src.m_offset( extents.domain_offset(0) + , extents.domain_offset(1) + , extents.domain_offset(2) + , extents.domain_offset(3) + , extents.domain_offset(4) + , extents.domain_offset(5) + , extents.domain_offset(6) + , extents.domain_offset(7) ) ); } }; @@ -2776,53 +2710,12 @@ namespace Kokkos { namespace Experimental { namespace Impl { -template< class V - , bool R0 = false , bool R1 = false , bool R2 = false , bool R3 = false - , bool R4 = false , bool R5 = false , bool R6 = false , bool R7 = false > -struct SubviewType ; - -template< class D , class A1, class A2, class A3 - , bool R0 , bool R1 , bool R2 , bool R3 - , bool R4 , bool R5 , bool R6 , bool R7 > -struct SubviewType< Kokkos::Experimental::View< D , A1, A2, A3 > , R0 , R1 , R2 , R3 , R4 , R5 , R6 , R7 > -{ -private: - typedef Kokkos::Experimental::ViewTraits< D , A1 , A2 , A3 > traits ; - typedef Kokkos::Experimental::Impl::SubviewMapping< traits , R0 , R1 , R2 , R3 , R4 , R5 , R6 , R7 > mapping ; -public: - typedef typename mapping::type type ; -}; - -}}} // namespace Kokkos::Experimental::Impl - -//---------------------------------------------------------------------------- -//---------------------------------------------------------------------------- - -namespace Kokkos { -namespace Experimental { -namespace Impl { - class Error_view_scalar_reference_to_non_scalar_view ; } /* namespace Impl */ } /* namespace Experimental */ } /* namespace Kokkos */ -#if defined( KOKKOS_EXPRESSION_CHECK ) - -#define KOKKOS_ASSERT_VIEW_MAPPING_ACCESS( SPACE , MAP , RANK , I0 , I1 , I2 , I3 , I4 , I5 , I6 , I7 ) \ - Kokkos::Impl::VerifyExecutionCanAccessMemorySpace< \ - Kokkos::Impl::ActiveExecutionMemorySpace , SPACE >::verify( MAP.data() ); \ - /* array bounds checking */ - -#else - -#define KOKKOS_ASSERT_VIEW_MAPPING_ACCESS( SPACE , MAP , RANK , I0 , I1 , I2 , I3 , I4 , I5 , I6 , I7 ) \ - Kokkos::Impl::VerifyExecutionCanAccessMemorySpace< \ - Kokkos::Impl::ActiveExecutionMemorySpace , SPACE >::verify( MAP.data() ) - -#endif - //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- diff --git a/lib/kokkos/core/src/impl/KokkosExp_ViewTile.hpp b/lib/kokkos/core/src/impl/KokkosExp_ViewTile.hpp index a661a35347..32bd7bac91 100644 --- a/lib/kokkos/core/src/impl/KokkosExp_ViewTile.hpp +++ b/lib/kokkos/core/src/impl/KokkosExp_ViewTile.hpp @@ -69,8 +69,8 @@ struct ViewOffset< Dimension , Layout , { public: - enum { SHIFT_0 = Kokkos::Impl::power_of_two::value }; - enum { SHIFT_1 = Kokkos::Impl::power_of_two::value }; + enum { SHIFT_0 = Kokkos::Impl::integral_power_of_two(Layout::N0) }; + enum { SHIFT_1 = Kokkos::Impl::integral_power_of_two(Layout::N1) }; enum { SHIFT_T = SHIFT_0 + SHIFT_1 }; enum { MASK_0 = Layout::N0 - 1 }; enum { MASK_1 = Layout::N1 - 1 }; @@ -155,6 +155,42 @@ public: {} }; +template< typename T , unsigned N0 , unsigned N1 , class ... P + , typename iType0 , typename iType1 + > +struct ViewMapping + < void + , Kokkos::Experimental::ViewTraits,P...> + , Kokkos::LayoutTileLeft + , iType0 + , iType1 > +{ + typedef Kokkos::LayoutTileLeft src_layout ; + typedef Kokkos::Experimental::ViewTraits< T** , src_layout , P... > src_traits ; + typedef Kokkos::Experimental::ViewTraits< T[N0][N1] , LayoutLeft , P ... > traits ; + typedef Kokkos::Experimental::View< T[N0][N1] , LayoutLeft , P ... > type ; + + KOKKOS_INLINE_FUNCTION static + void assign( ViewMapping< traits , void > & dst + , const ViewMapping< src_traits , void > & src + , const src_layout & + , const size_t i_tile0 + , const size_t i_tile1 + ) + { + typedef ViewMapping< traits , void > dst_map_type ; + typedef ViewMapping< src_traits , void > src_map_type ; + typedef typename dst_map_type::handle_type dst_handle_type ; + typedef typename dst_map_type::offset_type dst_offset_type ; + typedef typename src_map_type::offset_type src_offset_type ; + + dst = dst_map_type( + dst_handle_type( src.m_handle + + ( ( i_tile0 + src.m_offset.m_tile_N0 * i_tile1 ) << src_offset_type::SHIFT_T ) ) , + dst_offset_type() ); + } +}; + } /* namespace Impl */ } /* namespace Experimental */ } /* namespace Kokkos */ @@ -162,51 +198,20 @@ public: namespace Kokkos { namespace Experimental { -// Using View with an invalid data type to construct the tiling subview. -// View is a friend of View so we use this invalid data type partial specialization -// to access implementation of both source and destination view for constructing -// the tile subview. - -template< unsigned N0 , unsigned N1 > -struct View< void , Kokkos::LayoutTileLeft , void , void > -{ - typedef Kokkos::LayoutTileLeft Layout ; - - template< typename T , class A2 , class A3 > - KOKKOS_INLINE_FUNCTION static - Kokkos::Experimental::View< T[N0][N1] , LayoutLeft , A2 , A3 > - tile_subview( const Kokkos::Experimental::View & src - , const size_t i_tile0 - , const size_t i_tile1 - ) - { - typedef Kokkos::Experimental::View SrcView ; - typedef Kokkos::Experimental::View< T[N0][N1] , LayoutLeft , A2 , A3 > DstView ; - - typedef typename SrcView::map_type::offset_type src_offset_type ; - typedef typename DstView::map_type dst_map_type ; - typedef typename DstView::map_type::handle_type dst_handle_type ; - typedef typename DstView::map_type::offset_type dst_offset_type ; - - return DstView( src.m_track , - dst_map_type( - dst_handle_type( src.m_map.m_handle + - ( ( i_tile0 + src.m_map.m_offset.m_tile_N0 * i_tile1 ) << src_offset_type::SHIFT_T ) ) , - dst_offset_type() ) - ); - } -}; - -template< typename T , unsigned N0 , unsigned N1 , class A2 , class A3 > +template< typename T , unsigned N0 , unsigned N1 , class ... P > KOKKOS_INLINE_FUNCTION -Kokkos::Experimental::View< T[N0][N1] , LayoutLeft , A2 , A3 > -tile_subview( const Kokkos::Experimental::View,A2,A3> & src +Kokkos::Experimental::View< T[N0][N1] , LayoutLeft , P... > +tile_subview( const Kokkos::Experimental::View,P...> & src , const size_t i_tile0 , const size_t i_tile1 ) { - return View< void , Kokkos::LayoutTileLeft , void , void >:: - tile_subview( src , i_tile0 , i_tile1 ); + // Force the specialized ViewMapping for extracting a tile + // by using the first subview argument as the layout. + typedef Kokkos::LayoutTileLeft SrcLayout ; + + return Kokkos::Experimental::View< T[N0][N1] , LayoutLeft , P... > + ( src , SrcLayout() , i_tile0 , i_tile1 ); } } /* namespace Experimental */ diff --git a/lib/kokkos/core/src/impl/Kokkos_AllocationTracker.cpp b/lib/kokkos/core/src/impl/Kokkos_AllocationTracker.cpp index 7fb33853d6..efd2a096ad 100644 --- a/lib/kokkos/core/src/impl/Kokkos_AllocationTracker.cpp +++ b/lib/kokkos/core/src/impl/Kokkos_AllocationTracker.cpp @@ -43,6 +43,8 @@ #include +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + #if defined( KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_HOST ) #include @@ -842,3 +844,5 @@ void * create_singleton( size_t size #endif /* #if defined( KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_HOST ) */ +#endif /* #if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) */ + diff --git a/lib/kokkos/core/src/impl/Kokkos_AllocationTracker.hpp b/lib/kokkos/core/src/impl/Kokkos_AllocationTracker.hpp index 331c4e8fac..8912d73bab 100644 --- a/lib/kokkos/core/src/impl/Kokkos_AllocationTracker.hpp +++ b/lib/kokkos/core/src/impl/Kokkos_AllocationTracker.hpp @@ -46,6 +46,8 @@ #include +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + #include #include @@ -351,7 +353,6 @@ public: //----------------------------------------------------------------------------- // forward declaration for friend classes -struct CopyWithoutTracking; struct MallocHelper; /// class AllocationTracker @@ -544,6 +545,10 @@ public: /// NOT thread-safe void reallocate( size_t size ) const; + static void disable_tracking(); + static void enable_tracking(); + static bool tracking_enabled(); + private: static AllocationTracker find( void const * ptr, AllocatorBase const * arg_allocator ); @@ -556,31 +561,14 @@ private: void increment_ref_count() const; void decrement_ref_count() const; - static void disable_tracking(); - static void enable_tracking(); - static bool tracking_enabled(); - - friend struct Impl::CopyWithoutTracking; friend struct Impl::MallocHelper; uintptr_t m_alloc_rec; }; - - -/// Make a copy of the functor with reference counting disabled -struct CopyWithoutTracking -{ - template - static Functor apply( const Functor & f ) - { - AllocationTracker::disable_tracking(); - Functor func(f); - AllocationTracker::enable_tracking(); - return func; - } -}; - }} // namespace Kokkos::Impl +#endif /* #if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) */ + #endif //KOKKOS_ALLOCATION_TRACKER_HPP + diff --git a/lib/kokkos/core/src/impl/Kokkos_Atomic_View.hpp b/lib/kokkos/core/src/impl/Kokkos_Atomic_View.hpp index f95ed67da9..b1d47e19fb 100644 --- a/lib/kokkos/core/src/impl/Kokkos_Atomic_View.hpp +++ b/lib/kokkos/core/src/impl/Kokkos_Atomic_View.hpp @@ -427,6 +427,8 @@ struct Kokkos_Atomic_is_only_allowed_with_32bit_and_64bit_scalars<8> { typedef int64_t type; }; +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + // Must be non-const, atomic access trait, and 32 or 64 bit type for true atomics. template class ViewDataHandle< @@ -457,6 +459,8 @@ public: } }; +#endif /* #if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) */ + }} // namespace Kokkos::Impl #endif diff --git a/lib/kokkos/core/src/impl/Kokkos_Atomic_Windows.hpp b/lib/kokkos/core/src/impl/Kokkos_Atomic_Windows.hpp index 62581569fb..14066e8be2 100644 --- a/lib/kokkos/core/src/impl/Kokkos_Atomic_Windows.hpp +++ b/lib/kokkos/core/src/impl/Kokkos_Atomic_Windows.hpp @@ -45,6 +45,7 @@ #ifdef _WIN32 #define NOMINMAX +#include #include namespace Kokkos { @@ -61,7 +62,6 @@ namespace Kokkos { }; } -#ifdef KOKKOS_HAVE_CXX11 template < typename T > KOKKOS_INLINE_FUNCTION T atomic_compare_exchange(volatile T * const dest, const T & compare, @@ -103,10 +103,18 @@ namespace Kokkos { KOKKOS_INLINE_FUNCTION U() {}; } tmp, newval; newval.t = val; - tmp.i = _InterlockedCompareExchange128((LONGLONG*)dest, newval.i.upper, newval.i.lower, *((LONGLONG*)&compare)); + _InterlockedCompareExchange128((LONGLONG*)dest, newval.i.upper, newval.i.lower, ((LONGLONG*)&compare)); + tmp.t = dest; return tmp.t; } + template < typename T > + KOKKOS_INLINE_FUNCTION + T atomic_compare_exchange_strong(volatile T * const dest, const T & compare, const T & val) + { + return atomic_compare_exchange(dest,compare,val); + } + template< typename T > T atomic_fetch_or(volatile T * const dest, const T val) { T oldval = *dest; @@ -147,7 +155,20 @@ namespace Kokkos { } template< typename T > - T atomic_fetch_exchange(volatile T * const dest, const T val) { + T atomic_fetch_sub(volatile T * const dest, const T val) { + T oldval = *dest; + T assume; + do { + assume = oldval; + T newval = val - oldval; + oldval = atomic_compare_exchange(dest, assume, newval); + } while (assume != oldval); + + return oldval; + } + + template< typename T > + T atomic_exchange(volatile T * const dest, const T val) { T oldval = *dest; T assume; do { @@ -174,8 +195,8 @@ namespace Kokkos { } template< typename T > - void atomic_exchange(volatile T * const dest, const T val) { - atomic_fetch_exchange(dest, val); + void atomic_sub(volatile T * const dest, const T val) { + atomic_fetch_sub(dest, val); } template< typename T > @@ -208,4 +229,4 @@ namespace Kokkos { } #endif #endif -#endif \ No newline at end of file + diff --git a/lib/kokkos/core/src/impl/Kokkos_BasicAllocators.cpp b/lib/kokkos/core/src/impl/Kokkos_BasicAllocators.cpp index 08085dca36..6562ea700c 100644 --- a/lib/kokkos/core/src/impl/Kokkos_BasicAllocators.cpp +++ b/lib/kokkos/core/src/impl/Kokkos_BasicAllocators.cpp @@ -43,6 +43,8 @@ #include +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + #include #include @@ -50,8 +52,11 @@ #include // uintptr_t #include // for malloc, realloc, and free #include // for memcpy + +#if defined(KOKKOS_POSIX_MEMALIGN_AVAILABLE) #include // for mmap, munmap, MAP_ANON, etc #include // for sysconf, _SC_PAGE_SIZE, _SC_PHYS_PAGES +#endif #include @@ -103,8 +108,7 @@ void * raw_aligned_allocate( size_t size, size_t alignment ) #if defined( __INTEL_COMPILER ) && !defined ( KOKKOS_HAVE_CUDA ) ptr = _mm_malloc( size , alignment ); -#elif ( defined( _POSIX_C_SOURCE ) && _POSIX_C_SOURCE >= 200112L ) || \ - ( defined( _XOPEN_SOURCE ) && _XOPEN_SOURCE >= 600 ) +#elif defined(KOKKOS_POSIX_MEMALIGN_AVAILABLE) posix_memalign( & ptr, alignment , size ); @@ -136,8 +140,7 @@ void raw_aligned_deallocate( void * ptr, size_t /*size*/ ) #if defined( __INTEL_COMPILER ) && !defined ( KOKKOS_HAVE_CUDA ) _mm_free( ptr ); -#elif ( defined( _POSIX_C_SOURCE ) && _POSIX_C_SOURCE >= 200112L ) || \ - ( defined( _XOPEN_SOURCE ) && _XOPEN_SOURCE >= 600 ) +#elif defined(KOKKOS_POSIX_MEMALIGN_AVAILABLE) free( ptr ); #else // get the alloc'd pointer @@ -279,3 +282,6 @@ void * PageAlignedAllocator::reallocate(void * old_ptr, size_t old_size, size_t } }} // namespace Kokkos::Impl + +#endif /* #if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) */ + diff --git a/lib/kokkos/core/src/impl/Kokkos_BasicAllocators.hpp b/lib/kokkos/core/src/impl/Kokkos_BasicAllocators.hpp index 76377c5f15..43a150fb4a 100644 --- a/lib/kokkos/core/src/impl/Kokkos_BasicAllocators.hpp +++ b/lib/kokkos/core/src/impl/Kokkos_BasicAllocators.hpp @@ -44,6 +44,7 @@ #ifndef KOKKOS_BASIC_ALLOCATORS_HPP #define KOKKOS_BASIC_ALLOCATORS_HPP +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) namespace Kokkos { namespace Impl { @@ -113,6 +114,8 @@ public: }} // namespace Kokkos::Impl +#endif /* #if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) */ + #endif //KOKKOS_BASIC_ALLOCATORS_HPP diff --git a/lib/kokkos/core/src/impl/Kokkos_Core.cpp b/lib/kokkos/core/src/impl/Kokkos_Core.cpp index 1c3c83cfe7..bb0ce3f834 100644 --- a/lib/kokkos/core/src/impl/Kokkos_Core.cpp +++ b/lib/kokkos/core/src/impl/Kokkos_Core.cpp @@ -67,6 +67,13 @@ bool is_unsigned_int(const char* str) void initialize_internal(const InitArguments& args) { +// This is an experimental setting +// For KNL in Flat mode this variable should be set, so that +// memkind allocates high bandwidth memory correctly. +#ifdef KOKKOS_HAVE_HBWSPACE +setenv("MEMKIND_HBW_NODES", "1", 0); +#endif + // Protect declarations, to prevent "unused variable" warnings. #if defined( KOKKOS_HAVE_OPENMP ) || defined( KOKKOS_HAVE_PTHREAD ) const int num_threads = args.num_threads; diff --git a/lib/kokkos/core/src/impl/Kokkos_Error.cpp b/lib/kokkos/core/src/impl/Kokkos_Error.cpp index 97cfbfae7e..36224990d0 100644 --- a/lib/kokkos/core/src/impl/Kokkos_Error.cpp +++ b/lib/kokkos/core/src/impl/Kokkos_Error.cpp @@ -61,7 +61,7 @@ void host_abort( const char * const message ) { fwrite(message,1,strlen(message),stderr); fflush(stderr); - abort(); + ::abort(); } void throw_runtime_exception( const std::string & msg ) diff --git a/lib/kokkos/core/src/impl/Kokkos_Error.hpp b/lib/kokkos/core/src/impl/Kokkos_Error.hpp index 33e203c948..5f88d66206 100644 --- a/lib/kokkos/core/src/impl/Kokkos_Error.hpp +++ b/lib/kokkos/core/src/impl/Kokkos_Error.hpp @@ -46,6 +46,10 @@ #include #include +#include +#ifdef KOKKOS_HAVE_CUDA +#include +#endif namespace Kokkos { namespace Impl { diff --git a/lib/kokkos/core/src/impl/Kokkos_HBWAllocators.cpp b/lib/kokkos/core/src/impl/Kokkos_HBWAllocators.cpp new file mode 100644 index 0000000000..4eb80d03f1 --- /dev/null +++ b/lib/kokkos/core/src/impl/Kokkos_HBWAllocators.cpp @@ -0,0 +1,108 @@ +/* +//@HEADER +// ************************************************************************ +// +// Kokkos v. 2.0 +// Copyright (2014) Sandia Corporation +// +// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, +// the U.S. Government retains certain rights in this software. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the Corporation nor the names of the +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Questions? Contact H. Carter Edwards (hcedwar@sandia.gov) +// +// ************************************************************************ +//@HEADER +*/ + +#include + +#include +#include + + +#include // uintptr_t +#include // for malloc, realloc, and free +#include // for memcpy + +#if defined(KOKKOS_POSIX_MEMALIGN_AVAILABLE) +#include // for mmap, munmap, MAP_ANON, etc +#include // for sysconf, _SC_PAGE_SIZE, _SC_PHYS_PAGES +#endif + +#include +#include + +#ifdef KOKKOS_HAVE_HBWSPACE +#include + +namespace Kokkos { +namespace Experimental { +namespace Impl { +#define MEMKIND_TYPE MEMKIND_HBW //hbw_get_kind(HBW_PAGESIZE_4KB) +/*--------------------------------------------------------------------------*/ + +void* HBWMallocAllocator::allocate( size_t size ) +{ + std::cout<< "Allocate HBW: " << 1.0e-6*size << "MB" << std::endl; + void * ptr = NULL; + if (size) { + ptr = memkind_malloc(MEMKIND_TYPE,size); + + if (!ptr) + { + std::ostringstream msg ; + msg << name() << ": allocate(" << size << ") FAILED"; + Kokkos::Impl::throw_runtime_exception( msg.str() ); + } + } + return ptr; +} + +void HBWMallocAllocator::deallocate( void * ptr, size_t /*size*/ ) +{ + if (ptr) { + memkind_free(MEMKIND_TYPE,ptr); + } +} + +void * HBWMallocAllocator::reallocate(void * old_ptr, size_t /*old_size*/, size_t new_size) +{ + void * ptr = memkind_realloc(MEMKIND_TYPE, old_ptr, new_size); + + if (new_size > 0u && ptr == NULL) { + Kokkos::Impl::throw_runtime_exception("Error: Malloc Allocator could not reallocate memory"); + } + return ptr; +} + +} // namespace Impl +} // namespace Experimental +} // namespace Kokkos +#endif diff --git a/lib/kokkos/core/src/impl/Kokkos_HBWAllocators.hpp b/lib/kokkos/core/src/impl/Kokkos_HBWAllocators.hpp new file mode 100644 index 0000000000..be0134460b --- /dev/null +++ b/lib/kokkos/core/src/impl/Kokkos_HBWAllocators.hpp @@ -0,0 +1,75 @@ +/* +//@HEADER +// ************************************************************************ +// +// Kokkos v. 2.0 +// Copyright (2014) Sandia Corporation +// +// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, +// the U.S. Government retains certain rights in this software. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the Corporation nor the names of the +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Questions? Contact H. Carter Edwards (hcedwar@sandia.gov) +// +// ************************************************************************ +//@HEADER +*/ + +#ifndef KOKKOS_HBW_ALLOCATORS_HPP +#define KOKKOS_HBW_ALLOCATORS_HPP + +#ifdef KOKKOS_HAVE_HBWSPACE + +namespace Kokkos { +namespace Experimental { +namespace Impl { + +/// class MallocAllocator +class HBWMallocAllocator +{ +public: + static const char * name() + { + return "HBW Malloc Allocator"; + } + + static void* allocate(size_t size); + + static void deallocate(void * ptr, size_t size); + + static void * reallocate(void * old_ptr, size_t old_size, size_t new_size); +}; + +} +} +} // namespace Kokkos::Impl +#endif //KOKKOS_HAVE_HBWSPACE +#endif //KOKKOS_HBW_ALLOCATORS_HPP + + diff --git a/lib/kokkos/core/src/impl/Kokkos_HBWSpace.cpp b/lib/kokkos/core/src/impl/Kokkos_HBWSpace.cpp new file mode 100644 index 0000000000..68e424e859 --- /dev/null +++ b/lib/kokkos/core/src/impl/Kokkos_HBWSpace.cpp @@ -0,0 +1,397 @@ +/* +//@HEADER +// ************************************************************************ +// +// Kokkos v. 2.0 +// Copyright (2014) Sandia Corporation +// +// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, +// the U.S. Government retains certain rights in this software. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the Corporation nor the names of the +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Questions? Contact H. Carter Edwards (hcedwar@sandia.gov) +// +// ************************************************************************ +//@HEADER +*/ + + +#include + + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#ifdef KOKKOS_HAVE_HBWSPACE +#include +#endif + +//---------------------------------------------------------------------------- +//---------------------------------------------------------------------------- +#ifdef KOKKOS_HAVE_HBWSPACE +#define MEMKIND_TYPE MEMKIND_HBW //hbw_get_kind(HBW_PAGESIZE_4KB) + +namespace Kokkos { +namespace Experimental { +namespace { + +static const int QUERY_SPACE_IN_PARALLEL_MAX = 16 ; + +typedef int (* QuerySpaceInParallelPtr )(); + +QuerySpaceInParallelPtr s_in_parallel_query[ QUERY_SPACE_IN_PARALLEL_MAX ] ; +int s_in_parallel_query_count = 0 ; + +} // namespace + +void HBWSpace::register_in_parallel( int (*device_in_parallel)() ) +{ + if ( 0 == device_in_parallel ) { + Kokkos::Impl::throw_runtime_exception( std::string("Kokkos::Experimental::HBWSpace::register_in_parallel ERROR : given NULL" ) ); + } + + int i = -1 ; + + if ( ! (device_in_parallel)() ) { + for ( i = 0 ; i < s_in_parallel_query_count && ! (*(s_in_parallel_query[i]))() ; ++i ); + } + + if ( i < s_in_parallel_query_count ) { + Kokkos::Impl::throw_runtime_exception( std::string("Kokkos::Experimental::HBWSpace::register_in_parallel_query ERROR : called in_parallel" ) ); + + } + + if ( QUERY_SPACE_IN_PARALLEL_MAX <= i ) { + Kokkos::Impl::throw_runtime_exception( std::string("Kokkos::Experimental::HBWSpace::register_in_parallel_query ERROR : exceeded maximum" ) ); + + } + + for ( i = 0 ; i < s_in_parallel_query_count && s_in_parallel_query[i] != device_in_parallel ; ++i ); + + if ( i == s_in_parallel_query_count ) { + s_in_parallel_query[s_in_parallel_query_count++] = device_in_parallel ; + } +} + +int HBWSpace::in_parallel() +{ + const int n = s_in_parallel_query_count ; + + int i = 0 ; + + while ( i < n && ! (*(s_in_parallel_query[i]))() ) { ++i ; } + + return i < n ; +} + +} // namespace Experiemtal +} // namespace Kokkos + +/*--------------------------------------------------------------------------*/ + +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + +namespace Kokkos { +namespace Experimental { + +Kokkos::Impl::AllocationTracker HBWSpace::allocate_and_track( const std::string & label, const size_t size ) +{ + return Kokkos::Impl::AllocationTracker( allocator(), size, label ); +} + +} // namespace Experimental +} // namespace Kokkos + +#endif /* #if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) */ + +/*--------------------------------------------------------------------------*/ + +namespace Kokkos { +namespace Experimental { + +/* Default allocation mechanism */ +HBWSpace::HBWSpace() + : m_alloc_mech( + HBWSpace::STD_MALLOC + ) +{ +printf("Init\n"); +setenv("MEMKIND_HBW_NODES", "1", 0); +} + +/* Default allocation mechanism */ +HBWSpace::HBWSpace( const HBWSpace::AllocationMechanism & arg_alloc_mech ) + : m_alloc_mech( HBWSpace::STD_MALLOC ) +{ +printf("Init2\n"); +setenv("MEMKIND_HBW_NODES", "1", 0); + if ( arg_alloc_mech == STD_MALLOC ) { + m_alloc_mech = HBWSpace::STD_MALLOC ; + } +} + +void * HBWSpace::allocate( const size_t arg_alloc_size ) const +{ + static_assert( sizeof(void*) == sizeof(uintptr_t) + , "Error sizeof(void*) != sizeof(uintptr_t)" ); + + static_assert( Kokkos::Impl::power_of_two< Kokkos::Impl::MEMORY_ALIGNMENT >::value + , "Memory alignment must be power of two" ); + + constexpr uintptr_t alignment = Kokkos::Impl::MEMORY_ALIGNMENT ; + constexpr uintptr_t alignment_mask = alignment - 1 ; + + void * ptr = 0 ; + + if ( arg_alloc_size ) { + + if ( m_alloc_mech == STD_MALLOC ) { + // Over-allocate to and round up to guarantee proper alignment. + size_t size_padded = arg_alloc_size + sizeof(void*) + alignment ; + + void * alloc_ptr = memkind_malloc(MEMKIND_TYPE, size_padded ); + + if (alloc_ptr) { + uintptr_t address = reinterpret_cast(alloc_ptr); + + // offset enough to record the alloc_ptr + address += sizeof(void *); + uintptr_t rem = address % alignment; + uintptr_t offset = rem ? (alignment - rem) : 0u; + address += offset; + ptr = reinterpret_cast(address); + // record the alloc'd pointer + address -= sizeof(void *); + *reinterpret_cast(address) = alloc_ptr; + } + } + } + + if ( ( ptr == 0 ) || ( reinterpret_cast(ptr) == ~uintptr_t(0) ) + || ( reinterpret_cast(ptr) & alignment_mask ) ) { + std::ostringstream msg ; + msg << "Kokkos::Experimental::HBWSpace::allocate[ " ; + switch( m_alloc_mech ) { + case STD_MALLOC: msg << "STD_MALLOC" ; break ; + } + msg << " ]( " << arg_alloc_size << " ) FAILED" ; + if ( ptr == NULL ) { msg << " NULL" ; } + else { msg << " NOT ALIGNED " << ptr ; } + + std::cerr << msg.str() << std::endl ; + std::cerr.flush(); + + Kokkos::Impl::throw_runtime_exception( msg.str() ); + } + + return ptr; +} + + +void HBWSpace::deallocate( void * const arg_alloc_ptr , const size_t arg_alloc_size ) const +{ + if ( arg_alloc_ptr ) { + + if ( m_alloc_mech == STD_MALLOC ) { + void * alloc_ptr = *(reinterpret_cast(arg_alloc_ptr) -1); + memkind_free(MEMKIND_TYPE, alloc_ptr ); + } + + } +} + +} // namespace Experimental +} // namespace Kokkos + +//---------------------------------------------------------------------------- +//---------------------------------------------------------------------------- + +namespace Kokkos { +namespace Experimental { +namespace Impl { + +SharedAllocationRecord< void , void > +SharedAllocationRecord< Kokkos::Experimental::HBWSpace , void >::s_root_record ; + +void +SharedAllocationRecord< Kokkos::Experimental::HBWSpace , void >:: +deallocate( SharedAllocationRecord< void , void > * arg_rec ) +{ + delete static_cast(arg_rec); +} + +SharedAllocationRecord< Kokkos::Experimental::HBWSpace , void >:: +~SharedAllocationRecord() +{ + m_space.deallocate( SharedAllocationRecord< void , void >::m_alloc_ptr + , SharedAllocationRecord< void , void >::m_alloc_size + ); +} + +SharedAllocationRecord< Kokkos::Experimental::HBWSpace , void >:: +SharedAllocationRecord( const Kokkos::Experimental::HBWSpace & arg_space + , const std::string & arg_label + , const size_t arg_alloc_size + , const SharedAllocationRecord< void , void >::function_type arg_dealloc + ) + // Pass through allocated [ SharedAllocationHeader , user_memory ] + // Pass through deallocation function + : SharedAllocationRecord< void , void > + ( & SharedAllocationRecord< Kokkos::Experimental::HBWSpace , void >::s_root_record + , reinterpret_cast( arg_space.allocate( sizeof(SharedAllocationHeader) + arg_alloc_size ) ) + , sizeof(SharedAllocationHeader) + arg_alloc_size + , arg_dealloc + ) + , m_space( arg_space ) +{ + // Fill in the Header information + RecordBase::m_alloc_ptr->m_record = static_cast< SharedAllocationRecord< void , void > * >( this ); + + strncpy( RecordBase::m_alloc_ptr->m_label + , arg_label.c_str() + , SharedAllocationHeader::maximum_label_length + ); +} + +//---------------------------------------------------------------------------- + +void * SharedAllocationRecord< Kokkos::Experimental::HBWSpace , void >:: +allocate_tracked( const Kokkos::Experimental::HBWSpace & arg_space + , const std::string & arg_alloc_label + , const size_t arg_alloc_size ) +{ + if ( ! arg_alloc_size ) return (void *) 0 ; + + SharedAllocationRecord * const r = + allocate( arg_space , arg_alloc_label , arg_alloc_size ); + + RecordBase::increment( r ); + + return r->data(); +} + +void SharedAllocationRecord< Kokkos::Experimental::HBWSpace , void >:: +deallocate_tracked( void * const arg_alloc_ptr ) +{ + if ( arg_alloc_ptr != 0 ) { + SharedAllocationRecord * const r = get_record( arg_alloc_ptr ); + + RecordBase::decrement( r ); + } +} + +void * SharedAllocationRecord< Kokkos::Experimental::HBWSpace , void >:: +reallocate_tracked( void * const arg_alloc_ptr + , const size_t arg_alloc_size ) +{ + SharedAllocationRecord * const r_old = get_record( arg_alloc_ptr ); + SharedAllocationRecord * const r_new = allocate( r_old->m_space , r_old->get_label() , arg_alloc_size ); + + Kokkos::Impl::DeepCopy( r_new->data() , r_old->data() + , std::min( r_old->size() , r_new->size() ) ); + + RecordBase::increment( r_new ); + RecordBase::decrement( r_old ); + + return r_new->data(); +} + +SharedAllocationRecord< Kokkos::Experimental::HBWSpace , void > * +SharedAllocationRecord< Kokkos::Experimental::HBWSpace , void >::get_record( void * alloc_ptr ) +{ + typedef SharedAllocationHeader Header ; + typedef SharedAllocationRecord< Kokkos::Experimental::HBWSpace , void > RecordHost ; + + SharedAllocationHeader const * const head = alloc_ptr ? Header::get_header( alloc_ptr ) : (SharedAllocationHeader *)0 ; + RecordHost * const record = head ? static_cast< RecordHost * >( head->m_record ) : (RecordHost *) 0 ; + + if ( ! alloc_ptr || record->m_alloc_ptr != head ) { + Kokkos::Impl::throw_runtime_exception( std::string("Kokkos::Experimental::Impl::SharedAllocationRecord< Kokkos::Experimental::HBWSpace , void >::get_record ERROR" ) ); + } + + return record ; +} + +// Iterate records to print orphaned memory ... +void SharedAllocationRecord< Kokkos::Experimental::HBWSpace , void >:: +print_records( std::ostream & s , const Kokkos::Experimental::HBWSpace & space , bool detail ) +{ + SharedAllocationRecord< void , void >::print_host_accessible_records( s , "HBWSpace" , & s_root_record , detail ); +} + +} // namespace Impl +} // namespace Experimental +} // namespace Kokkos + +/*--------------------------------------------------------------------------*/ +/*--------------------------------------------------------------------------*/ + +namespace Kokkos { +namespace Experimental { +namespace { + const unsigned HBW_SPACE_ATOMIC_MASK = 0xFFFF; + const unsigned HBW_SPACE_ATOMIC_XOR_MASK = 0x5A39; + static int HBW_SPACE_ATOMIC_LOCKS[HBW_SPACE_ATOMIC_MASK+1]; +} + +namespace Impl { +void init_lock_array_hbw_space() { + static int is_initialized = 0; + if(! is_initialized) + for(int i = 0; i < static_cast (HBW_SPACE_ATOMIC_MASK+1); i++) + HBW_SPACE_ATOMIC_LOCKS[i] = 0; +} + +bool lock_address_hbw_space(void* ptr) { + return 0 == atomic_compare_exchange( &HBW_SPACE_ATOMIC_LOCKS[ + (( size_t(ptr) >> 2 ) & HBW_SPACE_ATOMIC_MASK) ^ HBW_SPACE_ATOMIC_XOR_MASK] , + 0 , 1); +} + +void unlock_address_hbw_space(void* ptr) { + atomic_exchange( &HBW_SPACE_ATOMIC_LOCKS[ + (( size_t(ptr) >> 2 ) & HBW_SPACE_ATOMIC_MASK) ^ HBW_SPACE_ATOMIC_XOR_MASK] , + 0); +} + +} +} +} +#endif diff --git a/lib/kokkos/core/src/impl/Kokkos_HostSpace.cpp b/lib/kokkos/core/src/impl/Kokkos_HostSpace.cpp index 69b8ce86d0..851a39a3e4 100644 --- a/lib/kokkos/core/src/impl/Kokkos_HostSpace.cpp +++ b/lib/kokkos/core/src/impl/Kokkos_HostSpace.cpp @@ -41,7 +41,7 @@ //@HEADER */ - +#include #include /*--------------------------------------------------------------------------*/ @@ -56,10 +56,7 @@ /*--------------------------------------------------------------------------*/ -#if ( defined( _POSIX_C_SOURCE ) && _POSIX_C_SOURCE >= 200112L ) || \ - ( defined( _XOPEN_SOURCE ) && _XOPEN_SOURCE >= 600 ) - -#define KOKKOS_POSIX_MEMALIGN_AVAILABLE +#if defined(KOKKOS_POSIX_MEMALIGN_AVAILABLE) #include #include @@ -73,8 +70,9 @@ #endif // mmap flags for huge page tables +// the Cuda driver does not interoperate with MAP_HUGETLB #if defined( KOKKOS_POSIX_MMAP_FLAGS ) - #if defined( MAP_HUGETLB ) + #if defined( MAP_HUGETLB ) && ! defined( KOKKOS_HAVE_CUDA ) #define KOKKOS_POSIX_MMAP_FLAGS_HUGE (KOKKOS_POSIX_MMAP_FLAGS | MAP_HUGETLB ) #else #define KOKKOS_POSIX_MMAP_FLAGS_HUGE KOKKOS_POSIX_MMAP_FLAGS @@ -158,6 +156,8 @@ int HostSpace::in_parallel() /*--------------------------------------------------------------------------*/ +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + namespace Kokkos { Impl::AllocationTracker HostSpace::allocate_and_track( const std::string & label, const size_t size ) @@ -167,6 +167,8 @@ Impl::AllocationTracker HostSpace::allocate_and_track( const std::string & label } // namespace Kokkos +#endif /* #if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) */ + /*--------------------------------------------------------------------------*/ namespace Kokkos { @@ -225,13 +227,13 @@ void * HostSpace::allocate( const size_t arg_alloc_size ) const static_assert( sizeof(void*) == sizeof(uintptr_t) , "Error sizeof(void*) != sizeof(uintptr_t)" ); - static_assert( Kokkos::Impl::power_of_two< Kokkos::Impl::MEMORY_ALIGNMENT >::value + static_assert( Kokkos::Impl::is_integral_power_of_two( Kokkos::Impl::MEMORY_ALIGNMENT ) , "Memory alignment must be power of two" ); - constexpr size_t alignment = Kokkos::Impl::MEMORY_ALIGNMENT ; - constexpr size_t alignment_mask = alignment - 1 ; + constexpr uintptr_t alignment = Kokkos::Impl::MEMORY_ALIGNMENT ; + constexpr uintptr_t alignment_mask = alignment - 1 ; - void * ptr = NULL; + void * ptr = 0 ; if ( arg_alloc_size ) { @@ -272,9 +274,9 @@ void * HostSpace::allocate( const size_t arg_alloc_size ) const else if ( m_alloc_mech == POSIX_MMAP ) { constexpr size_t use_huge_pages = (1u << 27); constexpr int prot = PROT_READ | PROT_WRITE ; - const int flags = arg_alloc_size < use_huge_pages - ? KOKKOS_POSIX_MMAP_FLAGS - : KOKKOS_POSIX_MMAP_FLAGS_HUGE ; + const int flags = arg_alloc_size < use_huge_pages + ? KOKKOS_POSIX_MMAP_FLAGS + : KOKKOS_POSIX_MMAP_FLAGS_HUGE ; // read write access to private memory @@ -282,8 +284,8 @@ void * HostSpace::allocate( const size_t arg_alloc_size ) const , arg_alloc_size /* size in bytes */ , prot /* memory protection */ , flags /* visibility of updates */ - , -1 /* file descriptor */ - , 0 /* offset */ + , -1 /* file descriptor */ + , 0 /* offset */ ); /* Associated reallocation: @@ -293,8 +295,24 @@ void * HostSpace::allocate( const size_t arg_alloc_size ) const #endif } - if ( reinterpret_cast(ptr) & alignment_mask ) { - Kokkos::Impl::throw_runtime_exception( "Kokkos::HostSpace aligned allocation failed" ); + if ( ( ptr == 0 ) || ( reinterpret_cast(ptr) == ~uintptr_t(0) ) + || ( reinterpret_cast(ptr) & alignment_mask ) ) { + std::ostringstream msg ; + msg << "Kokkos::HostSpace::allocate[ " ; + switch( m_alloc_mech ) { + case STD_MALLOC: msg << "STD_MALLOC" ; break ; + case POSIX_MEMALIGN: msg << "POSIX_MEMALIGN" ; break ; + case POSIX_MMAP: msg << "POSIX_MMAP" ; break ; + case INTEL_MM_ALLOC: msg << "INTEL_MM_ALLOC" ; break ; + } + msg << " ]( " << arg_alloc_size << " ) FAILED" ; + if ( ptr == NULL ) { msg << " NULL" ; } + else { msg << " NOT ALIGNED " << ptr ; } + + std::cerr << msg.str() << std::endl ; + std::cerr.flush(); + + Kokkos::Impl::throw_runtime_exception( msg.str() ); } return ptr; @@ -333,6 +351,9 @@ void HostSpace::deallocate( void * const arg_alloc_ptr , const size_t arg_alloc_ } // namespace Kokkos +//---------------------------------------------------------------------------- +//---------------------------------------------------------------------------- + namespace Kokkos { namespace Experimental { namespace Impl { @@ -380,16 +401,59 @@ SharedAllocationRecord( const Kokkos::HostSpace & arg_space ); } +//---------------------------------------------------------------------------- + +void * SharedAllocationRecord< Kokkos::HostSpace , void >:: +allocate_tracked( const Kokkos::HostSpace & arg_space + , const std::string & arg_alloc_label + , const size_t arg_alloc_size ) +{ + if ( ! arg_alloc_size ) return (void *) 0 ; + + SharedAllocationRecord * const r = + allocate( arg_space , arg_alloc_label , arg_alloc_size ); + + RecordBase::increment( r ); + + return r->data(); +} + +void SharedAllocationRecord< Kokkos::HostSpace , void >:: +deallocate_tracked( void * const arg_alloc_ptr ) +{ + if ( arg_alloc_ptr != 0 ) { + SharedAllocationRecord * const r = get_record( arg_alloc_ptr ); + + RecordBase::decrement( r ); + } +} + +void * SharedAllocationRecord< Kokkos::HostSpace , void >:: +reallocate_tracked( void * const arg_alloc_ptr + , const size_t arg_alloc_size ) +{ + SharedAllocationRecord * const r_old = get_record( arg_alloc_ptr ); + SharedAllocationRecord * const r_new = allocate( r_old->m_space , r_old->get_label() , arg_alloc_size ); + + Kokkos::Impl::DeepCopy( r_new->data() , r_old->data() + , std::min( r_old->size() , r_new->size() ) ); + + RecordBase::increment( r_new ); + RecordBase::decrement( r_old ); + + return r_new->data(); +} + SharedAllocationRecord< Kokkos::HostSpace , void > * SharedAllocationRecord< Kokkos::HostSpace , void >::get_record( void * alloc_ptr ) { typedef SharedAllocationHeader Header ; typedef SharedAllocationRecord< Kokkos::HostSpace , void > RecordHost ; - SharedAllocationHeader const * const head = Header::get_header( alloc_ptr ); - RecordHost * const record = static_cast< RecordHost * >( head->m_record ); + SharedAllocationHeader const * const head = alloc_ptr ? Header::get_header( alloc_ptr ) : (SharedAllocationHeader *)0 ; + RecordHost * const record = head ? static_cast< RecordHost * >( head->m_record ) : (RecordHost *) 0 ; - if ( record->m_alloc_ptr != head ) { + if ( ! alloc_ptr || record->m_alloc_ptr != head ) { Kokkos::Impl::throw_runtime_exception( std::string("Kokkos::Experimental::Impl::SharedAllocationRecord< Kokkos::HostSpace , void >::get_record ERROR" ) ); } @@ -410,6 +474,54 @@ print_records( std::ostream & s , const Kokkos::HostSpace & space , bool detail /*--------------------------------------------------------------------------*/ /*--------------------------------------------------------------------------*/ +namespace Kokkos { +namespace Experimental { +namespace Impl { + +template< class > +struct ViewOperatorBoundsErrorAbort ; + +template<> +struct ViewOperatorBoundsErrorAbort< Kokkos::HostSpace > { + static void apply( const size_t rank + , const size_t n0 , const size_t n1 + , const size_t n2 , const size_t n3 + , const size_t n4 , const size_t n5 + , const size_t n6 , const size_t n7 + , const size_t i0 , const size_t i1 + , const size_t i2 , const size_t i3 + , const size_t i4 , const size_t i5 + , const size_t i6 , const size_t i7 ); +}; + +void ViewOperatorBoundsErrorAbort< Kokkos::HostSpace >:: +apply( const size_t rank + , const size_t n0 , const size_t n1 + , const size_t n2 , const size_t n3 + , const size_t n4 , const size_t n5 + , const size_t n6 , const size_t n7 + , const size_t i0 , const size_t i1 + , const size_t i2 , const size_t i3 + , const size_t i4 , const size_t i5 + , const size_t i6 , const size_t i7 ) +{ + char buffer[512]; + + snprintf( buffer , sizeof(buffer) + , "View operator bounds error : rank(%lu) dim(%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu) index(%lu,%lu,%lu,%lu,%lu,%lu,%lu,%lu)" + , rank , n0 , n1 , n2 , n3 , n4 , n5 , n6 , n7 + , i0 , i1 , i2 , i3 , i4 , i5 , i6 , i7 ); + + Kokkos::Impl::throw_runtime_exception( buffer ); +} + +} // namespace Impl +} // namespace Experimental +} // namespace Kokkos + +/*--------------------------------------------------------------------------*/ +/*--------------------------------------------------------------------------*/ + namespace Kokkos { namespace { const unsigned HOST_SPACE_ATOMIC_MASK = 0xFFFF; diff --git a/lib/kokkos/core/src/impl/Kokkos_Profiling_Interface.cpp b/lib/kokkos/core/src/impl/Kokkos_Profiling_Interface.cpp index a88be37dde..50e45166bd 100644 --- a/lib/kokkos/core/src/impl/Kokkos_Profiling_Interface.cpp +++ b/lib/kokkos/core/src/impl/Kokkos_Profiling_Interface.cpp @@ -138,9 +138,21 @@ namespace Kokkos { }; void finalize() { - if(NULL != finalizeProfileLibrary) { - (*finalizeProfileLibrary)(); - } + if(NULL != finalizeProfileLibrary) { + (*finalizeProfileLibrary)(); + + // Set all profile hooks to NULL to prevent + // any additional calls. Once we are told to + // finalize, we mean it + beginForCallee = NULL; + beginScanCallee = NULL; + beginReduceCallee = NULL; + endScanCallee = NULL; + endForCallee = NULL; + endReduceCallee = NULL; + initProfileLibrary = NULL; + finalizeProfileLibrary = NULL; + } }; } } diff --git a/lib/kokkos/core/src/impl/Kokkos_Serial_TaskPolicy.cpp b/lib/kokkos/core/src/impl/Kokkos_Serial_TaskPolicy.cpp index 688f97f42e..5f3e65b327 100644 --- a/lib/kokkos/core/src/impl/Kokkos_Serial_TaskPolicy.cpp +++ b/lib/kokkos/core/src/impl/Kokkos_Serial_TaskPolicy.cpp @@ -230,7 +230,7 @@ void Task::assign( Task ** const lhs , Task * rhs , const bool no_throw ) namespace { Task * s_ready = 0 ; -Task * s_denied = reinterpret_cast( ~((unsigned long)0) ); +Task * s_denied = reinterpret_cast( ~((uintptr_t)0) ); } diff --git a/lib/kokkos/core/src/impl/Kokkos_Shape.hpp b/lib/kokkos/core/src/impl/Kokkos_Shape.hpp index dba7301270..9749e0a1ff 100644 --- a/lib/kokkos/core/src/impl/Kokkos_Shape.hpp +++ b/lib/kokkos/core/src/impl/Kokkos_Shape.hpp @@ -272,14 +272,14 @@ void assert_shape_bounds( const ShapeType & shape , // Must supply at least as many indices as ranks. // Every index must be within bounds. const bool ok = ShapeType::rank <= arg_rank && - i0 < shape.N0 && - i1 < shape.N1 && - i2 < shape.N2 && - i3 < shape.N3 && - i4 < shape.N4 && - i5 < shape.N5 && - i6 < shape.N6 && - i7 < shape.N7 ; + i0 < size_t(shape.N0) && + i1 < size_t(shape.N1) && + i2 < size_t(shape.N2) && + i3 < size_t(shape.N3) && + i4 < size_t(shape.N4) && + i5 < size_t(shape.N5) && + i6 < size_t(shape.N6) && + i7 < size_t(shape.N7) ; if ( ! ok ) { AssertShapeBoundsAbort< Kokkos::Impl::ActiveExecutionMemorySpace > diff --git a/lib/kokkos/core/src/impl/Kokkos_Synchronic.hpp b/lib/kokkos/core/src/impl/Kokkos_Synchronic.hpp new file mode 100644 index 0000000000..b2aea14df4 --- /dev/null +++ b/lib/kokkos/core/src/impl/Kokkos_Synchronic.hpp @@ -0,0 +1,693 @@ +/* + +Copyright (c) 2014, NVIDIA Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef KOKKOS_SYNCHRONIC_HPP +#define KOKKOS_SYNCHRONIC_HPP + +#include + +#include +#include +#include +#include +#include + +namespace Kokkos { +namespace Impl { + +enum notify_hint { + notify_all, + notify_one, + notify_none +}; +enum expect_hint { + expect_urgent, + expect_delay +}; + +namespace Details { + +template +bool __synchronic_spin_wait_for_update(S const& arg, T const& nval, int attempts) noexcept { + int i = 0; + for(;i < __SYNCHRONIC_SPIN_RELAX(attempts); ++i) + if(__builtin_expect(arg.load(std::memory_order_relaxed) != nval,1)) + return true; + else + __synchronic_relax(); + for(;i < attempts; ++i) + if(__builtin_expect(arg.load(std::memory_order_relaxed) != nval,1)) + return true; + else + __synchronic_yield(); + return false; +} + +struct __exponential_backoff { + __exponential_backoff(int arg_maximum=512) : maximum(arg_maximum), microseconds(8), x(123456789), y(362436069), z(521288629) { + } + static inline void sleep_for(std::chrono::microseconds const& time) { + auto t = time.count(); + if(__builtin_expect(t > 75,0)) { + portable_sleep(time); + } + else if(__builtin_expect(t > 25,0)) + __synchronic_yield(); + else + __synchronic_relax(); + } + void sleep_for_step() { + sleep_for(step()); + } + std::chrono::microseconds step() { + float const f = ranfu(); + int const t = int(microseconds * f); + if(__builtin_expect(f >= 0.95f,0)) + microseconds = 8; + else + microseconds = (std::min)(microseconds>>1,maximum); + return std::chrono::microseconds(t); + } +private : + int maximum, microseconds, x, y, z; + int xorshf96() { + int t; + x ^= x << 16; x ^= x >> 5; x ^= x << 1; + t = x; x = y; y = z; z = t ^ x ^ y; + return z; + } + float ranfu() { + return (float)(xorshf96()&(~0UL>>1)) / (float)(~0UL>>1); + } +}; + +template +struct __synchronic_base { + +protected: + std::atomic atom; + + void notify(notify_hint = notify_all) noexcept { + } + void notify(notify_hint = notify_all) volatile noexcept { + } + +public : + __synchronic_base() noexcept = default; + constexpr __synchronic_base(T v) noexcept : atom(v) { } + __synchronic_base(const __synchronic_base&) = delete; + ~__synchronic_base() { } + __synchronic_base& operator=(const __synchronic_base&) = delete; + __synchronic_base& operator=(const __synchronic_base&) volatile = delete; + + void expect_update(T val, expect_hint = expect_urgent) const noexcept { + if(__synchronic_spin_wait_for_update(atom, val, __SYNCHRONIC_SPIN_COUNT_A)) + return; + __exponential_backoff b; + while(atom.load(std::memory_order_relaxed) == val) { + __do_backoff(b); + if(__synchronic_spin_wait_for_update(atom, val, __SYNCHRONIC_SPIN_COUNT_B)) + return; + } + } + void expect_update(T val, expect_hint = expect_urgent) const volatile noexcept { + if(__synchronic_spin_wait_for_update(atom, val, __SYNCHRONIC_SPIN_COUNT_A)) + return; + __exponential_backoff b; + while(atom.load(std::memory_order_relaxed) == val) { + __do_backoff(b); + if(__synchronic_spin_wait_for_update(atom, val, __SYNCHRONIC_SPIN_COUNT_B)) + return; + } + } + + template + void expect_update_until(T val, std::chrono::time_point const& then, expect_hint = expect_urgent) const { + if(__synchronic_spin_wait_for_update(atom, val, __SYNCHRONIC_SPIN_COUNT_A)) + return; + __exponential_backoff b; + std::chrono::milliseconds remains = then - std::chrono::high_resolution_clock::now(); + while(remains > std::chrono::milliseconds::zero() && atom.load(std::memory_order_relaxed) == val) { + __do_backoff(b); + if(__synchronic_spin_wait_for_update(atom, val, __SYNCHRONIC_SPIN_COUNT_B)) + return; + remains = then - std::chrono::high_resolution_clock::now(); + } + } + template + void expect_update_until(T val, std::chrono::time_point const& then, expect_hint = expect_urgent) const volatile { + if(__synchronic_spin_wait_for_update(atom, val, __SYNCHRONIC_SPIN_COUNT_A)) + return; + __exponential_backoff b; + std::chrono::milliseconds remains = then - std::chrono::high_resolution_clock::now(); + while(remains > std::chrono::milliseconds::zero() && atom.load(std::memory_order_relaxed) == val) { + __do_backoff(b); + if(__synchronic_spin_wait_for_update(atom, val, __SYNCHRONIC_SPIN_COUNT_B)) + return; + remains = then - std::chrono::high_resolution_clock::now(); + } + } +}; + +#ifdef __SYNCHRONIC_COMPATIBLE +template +struct __synchronic_base::type> { + +public: + std::atomic atom; + + void notify(notify_hint hint = notify_all) noexcept { + if(__builtin_expect(hint == notify_none,1)) + return; + auto const x = count.fetch_add(0,std::memory_order_acq_rel); + if(__builtin_expect(x,0)) { + if(__builtin_expect(hint == notify_all,1)) + __synchronic_wake_all(&atom); + else + __synchronic_wake_one(&atom); + } + } + void notify(notify_hint hint = notify_all) volatile noexcept { + if(__builtin_expect(hint == notify_none,1)) + return; + auto const x = count.fetch_add(0,std::memory_order_acq_rel); + if(__builtin_expect(x,0)) { + if(__builtin_expect(hint == notify_all,1)) + __synchronic_wake_all_volatile(&atom); + else + __synchronic_wake_one_volatile(&atom); + } + } + +public : + __synchronic_base() noexcept : count(0) { } + constexpr __synchronic_base(T v) noexcept : atom(v), count(0) { } + __synchronic_base(const __synchronic_base&) = delete; + ~__synchronic_base() { } + __synchronic_base& operator=(const __synchronic_base&) = delete; + __synchronic_base& operator=(const __synchronic_base&) volatile = delete; + + void expect_update(T val, expect_hint = expect_urgent) const noexcept { + if(__builtin_expect(__synchronic_spin_wait_for_update(atom, val,__SYNCHRONIC_SPIN_COUNT_A),1)) + return; + while(__builtin_expect(atom.load(std::memory_order_relaxed) == val,1)) { + count.fetch_add(1,std::memory_order_release); + __synchronic_wait(&atom,val); + count.fetch_add(-1,std::memory_order_acquire); + } + } + void expect_update(T val, expect_hint = expect_urgent) const volatile noexcept { + if(__builtin_expect(__synchronic_spin_wait_for_update(atom, val,__SYNCHRONIC_SPIN_COUNT_A),1)) + return; + while(__builtin_expect(atom.load(std::memory_order_relaxed) == val,1)) { + count.fetch_add(1,std::memory_order_release); + __synchronic_wait_volatile(&atom,val); + count.fetch_add(-1,std::memory_order_acquire); + } + } + + template + void expect_update_until(T val, std::chrono::time_point const& then, expect_hint = expect_urgent) const { + if(__builtin_expect(__synchronic_spin_wait_for_update(atom, val,__SYNCHRONIC_SPIN_COUNT_A),1)) + return; + std::chrono::milliseconds remains = then - std::chrono::high_resolution_clock::now(); + while(__builtin_expect(remains > std::chrono::milliseconds::zero() && atom.load(std::memory_order_relaxed) == val,1)) { + count.fetch_add(1,std::memory_order_release); + __synchronic_wait_timed(&atom,val,remains); + count.fetch_add(-1,std::memory_order_acquire); + remains = then - std::chrono::high_resolution_clock::now(); + } + } + template + void expect_update_until(T val, std::chrono::time_point const& then, expect_hint = expect_urgent) const volatile { + if(__builtin_expect(__synchronic_spin_wait_for_update(atom, val,__SYNCHRONIC_SPIN_COUNT_A),1)) + return; + std::chrono::milliseconds remains = then - std::chrono::high_resolution_clock::now(); + while(__builtin_expect(remains > std::chrono::milliseconds::zero() && atom.load(std::memory_order_relaxed) == val,1)) { + count.fetch_add(1,std::memory_order_release); + __synchronic_wait_timed_volatile(&atom,val,remains); + count.fetch_add(-1,std::memory_order_acquire); + remains = then - std::chrono::high_resolution_clock::now(); + } + } +private: + mutable std::atomic count; +}; +#endif + +template +struct __synchronic : public __synchronic_base { + + __synchronic() noexcept = default; + constexpr __synchronic(T v) noexcept : __synchronic_base(v) { } + __synchronic(const __synchronic&) = delete; + __synchronic& operator=(const __synchronic&) = delete; + __synchronic& operator=(const __synchronic&) volatile = delete; +}; + +template +struct __synchronic::value>::type> : public __synchronic_base { + + T fetch_add(T v, std::memory_order m = std::memory_order_seq_cst, notify_hint n = notify_all) volatile noexcept { + auto const t = this->atom.fetch_add(v,m); + this->notify(n); + return t; + } + T fetch_add(T v, std::memory_order m = std::memory_order_seq_cst, notify_hint n = notify_all) noexcept { + auto const t = this->atom.fetch_add(v,m); + this->notify(n); + return t; + } + T fetch_sub(T v, std::memory_order m = std::memory_order_seq_cst, notify_hint n = notify_all) volatile noexcept { + auto const t = this->atom.fetch_sub(v,m); + this->notify(n); + return t; + } + T fetch_sub(T v, std::memory_order m = std::memory_order_seq_cst, notify_hint n = notify_all) noexcept { + auto const t = this->atom.fetch_sub(v,m); + this->notify(n); + return t; + } + T fetch_and(T v, std::memory_order m = std::memory_order_seq_cst, notify_hint n = notify_all) volatile noexcept { + auto const t = this->atom.fetch_and(v,m); + this->notify(n); + return t; + } + T fetch_and(T v, std::memory_order m = std::memory_order_seq_cst, notify_hint n = notify_all) noexcept { + auto const t = this->atom.fetch_and(v,m); + this->notify(n); + return t; + } + T fetch_or(T v, std::memory_order m = std::memory_order_seq_cst, notify_hint n = notify_all) volatile noexcept { + auto const t = this->atom.fetch_or(v,m); + this->notify(n); + return t; + } + T fetch_or(T v, std::memory_order m = std::memory_order_seq_cst, notify_hint n = notify_all) noexcept { + auto const t = this->atom.fetch_or(v,m); + this->notify(n); + return t; + } + T fetch_xor(T v, std::memory_order m = std::memory_order_seq_cst, notify_hint n = notify_all) volatile noexcept { + auto const t = this->atom.fetch_xor(v,m); + this->notify(n); + return t; + } + T fetch_xor(T v, std::memory_order m = std::memory_order_seq_cst, notify_hint n = notify_all) noexcept { + auto const t = this->atom.fetch_xor(v,m); + this->notify(n); + return t; + } + + __synchronic() noexcept = default; + constexpr __synchronic(T v) noexcept : __synchronic_base(v) { } + __synchronic(const __synchronic&) = delete; + __synchronic& operator=(const __synchronic&) = delete; + __synchronic& operator=(const __synchronic&) volatile = delete; + + T operator=(T v) volatile noexcept { + auto const t = this->atom = v; + this->notify(); + return t; + } + T operator=(T v) noexcept { + auto const t = this->atom = v; + this->notify(); + return t; + } + T operator++(int) volatile noexcept { + auto const t = ++this->atom; + this->notify(); + return t; + } + T operator++(int) noexcept { + auto const t = ++this->atom; + this->notify(); + return t; + } + T operator--(int) volatile noexcept { + auto const t = --this->atom; + this->notify(); + return t; + } + T operator--(int) noexcept { + auto const t = --this->atom; + this->notify(); + return t; + } + T operator++() volatile noexcept { + auto const t = this->atom++; + this->notify(); + return t; + } + T operator++() noexcept { + auto const t = this->atom++; + this->notify(); + return t; + } + T operator--() volatile noexcept { + auto const t = this->atom--; + this->notify(); + return t; + } + T operator--() noexcept { + auto const t = this->atom--; + this->notify(); + return t; + } + T operator+=(T v) volatile noexcept { + auto const t = this->atom += v; + this->notify(); + return t; + } + T operator+=(T v) noexcept { + auto const t = this->atom += v; + this->notify(); + return t; + } + T operator-=(T v) volatile noexcept { + auto const t = this->atom -= v; + this->notify(); + return t; + } + T operator-=(T v) noexcept { + auto const t = this->atom -= v; + this->notify(); + return t; + } + T operator&=(T v) volatile noexcept { + auto const t = this->atom &= v; + this->notify(); + return t; + } + T operator&=(T v) noexcept { + auto const t = this->atom &= v; + this->notify(); + return t; + } + T operator|=(T v) volatile noexcept { + auto const t = this->atom |= v; + this->notify(); + return t; + } + T operator|=(T v) noexcept { + auto const t = this->atom |= v; + this->notify(); + return t; + } + T operator^=(T v) volatile noexcept { + auto const t = this->atom ^= v; + this->notify(); + return t; + } + T operator^=(T v) noexcept { + auto const t = this->atom ^= v; + this->notify(); + return t; + } +}; + +template +struct __synchronic : public __synchronic_base { + + T* fetch_add(ptrdiff_t v, std::memory_order m = std::memory_order_seq_cst, notify_hint n = notify_all) volatile noexcept { + auto const t = this->atom.fetch_add(v,m); + this->notify(n); + return t; + } + T* fetch_add(ptrdiff_t v, std::memory_order m = std::memory_order_seq_cst, notify_hint n = notify_all) noexcept { + auto const t = this->atom.fetch_add(v,m); + this->notify(n); + return t; + } + T* fetch_sub(ptrdiff_t v, std::memory_order m = std::memory_order_seq_cst, notify_hint n = notify_all) volatile noexcept { + auto const t = this->atom.fetch_sub(v,m); + this->notify(n); + return t; + } + T* fetch_sub(ptrdiff_t v, std::memory_order m = std::memory_order_seq_cst, notify_hint n = notify_all) noexcept { + auto const t = this->atom.fetch_sub(v,m); + this->notify(n); + return t; + } + + __synchronic() noexcept = default; + constexpr __synchronic(T* v) noexcept : __synchronic_base(v) { } + __synchronic(const __synchronic&) = delete; + __synchronic& operator=(const __synchronic&) = delete; + __synchronic& operator=(const __synchronic&) volatile = delete; + + T* operator=(T* v) volatile noexcept { + auto const t = this->atom = v; + this->notify(); + return t; + } + T* operator=(T* v) noexcept { + auto const t = this->atom = v; + this->notify(); + return t; + } + T* operator++(int) volatile noexcept { + auto const t = ++this->atom; + this->notify(); + return t; + } + T* operator++(int) noexcept { + auto const t = ++this->atom; + this->notify(); + return t; + } + T* operator--(int) volatile noexcept { + auto const t = --this->atom; + this->notify(); + return t; + } + T* operator--(int) noexcept { + auto const t = --this->atom; + this->notify(); + return t; + } + T* operator++() volatile noexcept { + auto const t = this->atom++; + this->notify(); + return t; + } + T* operator++() noexcept { + auto const t = this->atom++; + this->notify(); + return t; + } + T* operator--() volatile noexcept { + auto const t = this->atom--; + this->notify(); + return t; + } + T* operator--() noexcept { + auto const t = this->atom--; + this->notify(); + return t; + } + T* operator+=(ptrdiff_t v) volatile noexcept { + auto const t = this->atom += v; + this->notify(); + return t; + } + T* operator+=(ptrdiff_t v) noexcept { + auto const t = this->atom += v; + this->notify(); + return t; + } + T* operator-=(ptrdiff_t v) volatile noexcept { + auto const t = this->atom -= v; + this->notify(); + return t; + } + T* operator-=(ptrdiff_t v) noexcept { + auto const t = this->atom -= v; + this->notify(); + return t; + } +}; + +} //namespace Details + +template +struct synchronic : public Details::__synchronic { + + bool is_lock_free() const volatile noexcept { return this->atom.is_lock_free(); } + bool is_lock_free() const noexcept { return this->atom.is_lock_free(); } + void store(T v, std::memory_order m = std::memory_order_seq_cst, notify_hint n = notify_all) volatile noexcept { + this->atom.store(v,m); + this->notify(n); + } + void store(T v, std::memory_order m = std::memory_order_seq_cst, notify_hint n = notify_all) noexcept { + this->atom.store(v,m); + this->notify(n); + } + T load(std::memory_order m = std::memory_order_seq_cst) const volatile noexcept { return this->atom.load(m); } + T load(std::memory_order m = std::memory_order_seq_cst) const noexcept { return this->atom.load(m); } + + operator T() const volatile noexcept { return (T)this->atom; } + operator T() const noexcept { return (T)this->atom; } + + T exchange(T v, std::memory_order m = std::memory_order_seq_cst, notify_hint n = notify_all) volatile noexcept { + auto const t = this->atom.exchange(v,m); + this->notify(n); + return t; + } + T exchange(T v, std::memory_order m = std::memory_order_seq_cst, notify_hint n = notify_all) noexcept { + auto const t = this->atom.exchange(v,m); + this->notify(n); + return t; + } + bool compare_exchange_weak(T& r, T v, std::memory_order m1, std::memory_order m2, notify_hint n = notify_all) volatile noexcept { + auto const t = this->atom.compare_exchange_weak(r,v,m1,m2); + this->notify(n); + return t; + } + bool compare_exchange_weak(T& r, T v, std::memory_order m1, std::memory_order m2, notify_hint n = notify_all) noexcept { + auto const t = this->atom.compare_exchange_weak(r,v,m1, m2); + this->notify(n); + return t; + } + bool compare_exchange_strong(T& r, T v, std::memory_order m1, std::memory_order m2, notify_hint n = notify_all) volatile noexcept { + auto const t = this->atom.compare_exchange_strong(r,v,m1,m2); + this->notify(n); + return t; + } + bool compare_exchange_strong(T& r, T v, std::memory_order m1, std::memory_order m2, notify_hint n = notify_all) noexcept { + auto const t = this->atom.compare_exchange_strong(r,v,m1,m2); + this->notify(n); + return t; + } + bool compare_exchange_weak(T& r, T v, std::memory_order m = std::memory_order_seq_cst, notify_hint n = notify_all) volatile noexcept { + auto const t = this->atom.compare_exchange_weak(r,v,m); + this->notify(n); + return t; + } + bool compare_exchange_weak(T& r, T v, std::memory_order m = std::memory_order_seq_cst, notify_hint n = notify_all) noexcept { + auto const t = this->atom.compare_exchange_weak(r,v,m); + this->notify(n); + return t; + } + bool compare_exchange_strong(T& r, T v, std::memory_order m = std::memory_order_seq_cst, notify_hint n = notify_all) volatile noexcept { + auto const t = this->atom.compare_exchange_strong(r,v,m); + this->notify(n); + return t; + } + bool compare_exchange_strong(T& r, T v, std::memory_order m = std::memory_order_seq_cst, notify_hint n = notify_all) noexcept { + auto const t = this->atom.compare_exchange_strong(r,v,m); + this->notify(n); + return t; + } + + synchronic() noexcept = default; + constexpr synchronic(T val) noexcept : Details::__synchronic(val) { } + synchronic(const synchronic&) = delete; + ~synchronic() { } + synchronic& operator=(const synchronic&) = delete; + synchronic& operator=(const synchronic&) volatile = delete; + T operator=(T val) noexcept { + return Details::__synchronic::operator=(val); + } + T operator=(T val) volatile noexcept { + return Details::__synchronic::operator=(val); + } + + T load_when_not_equal(T val, std::memory_order order = std::memory_order_seq_cst, expect_hint h = expect_urgent) const noexcept { + Details::__synchronic::expect_update(val,h); + return load(order); + } + T load_when_not_equal(T val, std::memory_order order = std::memory_order_seq_cst, expect_hint h = expect_urgent) const volatile noexcept { + Details::__synchronic::expect_update(val,h); + return load(order); + } + T load_when_equal(T val, std::memory_order order = std::memory_order_seq_cst, expect_hint h = expect_urgent) const noexcept { + for(T nval = load(std::memory_order_relaxed); nval != val; nval = load(std::memory_order_relaxed)) + Details::__synchronic::expect_update(nval,h); + return load(order); + } + T load_when_equal(T val, std::memory_order order = std::memory_order_seq_cst, expect_hint h = expect_urgent) const volatile noexcept { + for(T nval = load(std::memory_order_relaxed); nval != val; nval = load(std::memory_order_relaxed)) + expect_update(nval,h); + return load(order); + } + template + void expect_update_for(T val, std::chrono::duration const& delta, expect_hint h = expect_urgent) const { + Details::__synchronic::expect_update_until(val, std::chrono::high_resolution_clock::now() + delta,h); + } + template < class Rep, class Period> + void expect_update_for(T val, std::chrono::duration const& delta, expect_hint h = expect_urgent) const volatile { + Details::__synchronic::expect_update_until(val, std::chrono::high_resolution_clock::now() + delta,h); + } +}; + +#include + +typedef synchronic synchronic_char; +typedef synchronic synchronic_schar; +typedef synchronic synchronic_uchar; +typedef synchronic synchronic_short; +typedef synchronic synchronic_ushort; +typedef synchronic synchronic_int; +typedef synchronic synchronic_uint; +typedef synchronic synchronic_long; +typedef synchronic synchronic_ulong; +typedef synchronic synchronic_llong; +typedef synchronic synchronic_ullong; +//typedef synchronic synchronic_char16_t; +//typedef synchronic synchronic_char32_t; +typedef synchronic synchronic_wchar_t; + +typedef synchronic synchronic_int_least8_t; +typedef synchronic synchronic_uint_least8_t; +typedef synchronic synchronic_int_least16_t; +typedef synchronic synchronic_uint_least16_t; +typedef synchronic synchronic_int_least32_t; +typedef synchronic synchronic_uint_least32_t; +//typedef synchronic synchronic_int_least_64_t; +typedef synchronic synchronic_uint_least64_t; +typedef synchronic synchronic_int_fast8_t; +typedef synchronic synchronic_uint_fast8_t; +typedef synchronic synchronic_int_fast16_t; +typedef synchronic synchronic_uint_fast16_t; +typedef synchronic synchronic_int_fast32_t; +typedef synchronic synchronic_uint_fast32_t; +typedef synchronic synchronic_int_fast64_t; +typedef synchronic synchronic_uint_fast64_t; +typedef synchronic synchronic_intptr_t; +typedef synchronic synchronic_uintptr_t; +typedef synchronic synchronic_size_t; +typedef synchronic synchronic_ptrdiff_t; +typedef synchronic synchronic_intmax_t; +typedef synchronic synchronic_uintmax_t; + +} +} + +#endif //__SYNCHRONIC_H diff --git a/lib/kokkos/core/src/impl/Kokkos_Synchronic_Config.hpp b/lib/kokkos/core/src/impl/Kokkos_Synchronic_Config.hpp new file mode 100644 index 0000000000..0a6dd6e715 --- /dev/null +++ b/lib/kokkos/core/src/impl/Kokkos_Synchronic_Config.hpp @@ -0,0 +1,169 @@ +/* + +Copyright (c) 2014, NVIDIA Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef KOKKOS_SYNCHRONIC_CONFIG_H +#define KOKKOS_SYNCHRONIC_CONFIG_H + +#include +#include + +namespace Kokkos { +namespace Impl { + +//the default yield function used inside the implementation is the Standard one +#define __synchronic_yield std::this_thread::yield +#define __synchronic_relax __synchronic_yield + +#if defined(_MSC_VER) + //this is a handy GCC optimization that I use inside the implementation + #define __builtin_expect(condition,common) condition + #if _MSC_VER <= 1800 + //using certain keywords that VC++ temporarily doesn't support + #define _ALLOW_KEYWORD_MACROS + #define noexcept + #define constexpr + #endif + //yes, I define multiple assignment operators + #pragma warning(disable:4522) + //I don't understand how Windows is so bad at timing functions, but is OK + //with straight-up yield loops + #define __do_backoff(b) __synchronic_yield() +#else +#define __do_backoff(b) b.sleep_for_step() +#endif + +//certain platforms have efficient support for spin-waiting built into the operating system +#if defined(__linux__) || (defined(_WIN32_WINNT) && _WIN32_WINNT >= 0x0602) +#if defined(_WIN32_WINNT) +#include +#include + //the combination of WaitOnAddress and WakeByAddressAll is supported on Windows 8.1+ + #define __synchronic_wait(x,v) WaitOnAddress((PVOID)x,(PVOID)&v,sizeof(v),-1) + #define __synchronic_wait_timed(x,v,t) WaitOnAddress((PVOID)x,(PVOID)&v,sizeof(v),std::chrono::duration_cast(t).count()) + #define __synchronic_wake_one(x) WakeByAddressSingle((PVOID)x) + #define __synchronic_wake_all(x) WakeByAddressAll((PVOID)x) + #define __synchronic_wait_volatile(x,v) WaitOnAddress((PVOID)x,(PVOID)&v,sizeof(v),-1) + #define __synchronic_wait_timed_volatile(x,v,t) WaitOnAddress((PVOID)x,(PVOID)&v,sizeof(v),std::chrono::duration_cast(t).count()) + #define __synchronic_wake_one_volatile(x) WakeByAddressSingle((PVOID)x) + #define __synchronic_wake_all_volatile(x) WakeByAddressAll((PVOID)x) + #define __SYNCHRONIC_COMPATIBLE(x) (std::is_pod::value && (sizeof(x) <= 8)) + + inline void native_sleep(unsigned long microseconds) + { + // What to do if microseconds is < 1000? + Sleep(microseconds / 1000); + } + + inline void native_yield() + { + SwitchToThread(); + } +#elif defined(__linux__) + #include + #include + #include + #include + #include + #include + #include + #include + template < class Rep, class Period> + inline timespec to_timespec(std::chrono::duration const& delta) { + struct timespec ts; + ts.tv_sec = static_cast(std::chrono::duration_cast(delta).count()); + assert(!ts.tv_sec); + ts.tv_nsec = static_cast(std::chrono::duration_cast(delta).count()); + return ts; + } + inline long futex(void const* addr1, int op, int val1) { + return syscall(SYS_futex, addr1, op, val1, 0, 0, 0); + } + inline long futex(void const* addr1, int op, int val1, struct timespec timeout) { + return syscall(SYS_futex, addr1, op, val1, &timeout, 0, 0); + } + inline void native_sleep(unsigned long microseconds) + { + usleep(microseconds); + } + inline void native_yield() + { + pthread_yield(); + } + + //the combination of SYS_futex(WAIT) and SYS_futex(WAKE) is supported on all recent Linux distributions + #define __synchronic_wait(x,v) futex(x, FUTEX_WAIT_PRIVATE, v) + #define __synchronic_wait_timed(x,v,t) futex(x, FUTEX_WAIT_PRIVATE, v, to_timespec(t)) + #define __synchronic_wake_one(x) futex(x, FUTEX_WAKE_PRIVATE, 1) + #define __synchronic_wake_all(x) futex(x, FUTEX_WAKE_PRIVATE, INT_MAX) + #define __synchronic_wait_volatile(x,v) futex(x, FUTEX_WAIT, v) + #define __synchronic_wait_volatile_timed(x,v,t) futex(x, FUTEX_WAIT, v, to_timespec(t)) + #define __synchronic_wake_one_volatile(x) futex(x, FUTEX_WAKE, 1) + #define __synchronic_wake_all_volatile(x) futex(x, FUTEX_WAKE, INT_MAX) + #define __SYNCHRONIC_COMPATIBLE(x) (std::is_integral::value && (sizeof(x) <= 4)) + + //the yield function on Linux is better replaced by sched_yield, which is tuned for spin-waiting + #undef __synchronic_yield + #define __synchronic_yield sched_yield + + //for extremely short wait times, just let another hyper-thread run + #undef __synchronic_relax + #define __synchronic_relax() asm volatile("rep; nop" ::: "memory") + +#endif +#endif + +#ifdef _GLIBCXX_USE_NANOSLEEP +inline void portable_sleep(std::chrono::microseconds const& time) +{ std::this_thread::sleep_for(time); } +#else +inline void portable_sleep(std::chrono::microseconds const& time) +{ native_sleep(time.count()); } +#endif + +#ifdef _GLIBCXX_USE_SCHED_YIELD +inline void portable_yield() +{ std::this_thread::yield(); } +#else +inline void portable_yield() +{ native_yield(); } +#endif + +//this is the number of times we initially spin, on the first wait attempt +#define __SYNCHRONIC_SPIN_COUNT_A 16 + +//this is how decide to yield instead of just spinning, 'c' is the current trip count +//#define __SYNCHRONIC_SPIN_YIELD(c) true +#define __SYNCHRONIC_SPIN_RELAX(c) (c>>3) + +//this is the number of times we normally spin, on every subsequent wait attempt +#define __SYNCHRONIC_SPIN_COUNT_B 8 + +} +} + +#endif //__SYNCHRONIC_CONFIG_H diff --git a/lib/kokkos/core/src/impl/Kokkos_Synchronic_n3998.hpp b/lib/kokkos/core/src/impl/Kokkos_Synchronic_n3998.hpp new file mode 100644 index 0000000000..facc8d6d8e --- /dev/null +++ b/lib/kokkos/core/src/impl/Kokkos_Synchronic_n3998.hpp @@ -0,0 +1,162 @@ +/* + +Copyright (c) 2014, NVIDIA Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef KOKKOS_SYNCHRONIC_N3998_HPP +#define KOKKOS_SYNCHRONIC_N3998_HPP + +#include +#include + +/* +In the section below, a synchronization point represents a point at which a +thread may block until a given synchronization condition has been reached or +at which it may notify other threads that a synchronization condition has +been achieved. +*/ +namespace Kokkos { namespace Impl { + + /* + A latch maintains an internal counter that is initialized when the latch + is created. The synchronization condition is reached when the counter is + decremented to 0. Threads may block at a synchronization point waiting + for the condition to be reached. When the condition is reached, any such + blocked threads will be released. + */ + struct latch { + latch(int val) : count(val), released(false) { } + latch(const latch&) = delete; + latch& operator=(const latch&) = delete; + ~latch( ) { } + void arrive( ) { + __arrive( ); + } + void arrive_and_wait( ) { + if(!__arrive( )) + wait( ); + } + void wait( ) { + while(!released.load_when_not_equal(false,std::memory_order_acquire)) + ; + } + bool try_wait( ) { + return released.load(std::memory_order_acquire); + } + private: + bool __arrive( ) { + if(count.fetch_add(-1,std::memory_order_release)!=1) + return false; + released.store(true,std::memory_order_release); + return true; + } + std::atomic count; + synchronic released; + }; + + /* + A barrier is created with an initial value representing the number of threads + that can arrive at the synchronization point. When that many threads have + arrived, the synchronization condition is reached and the threads are + released. The barrier will then reset, and may be reused for a new cycle, in + which the same set of threads may arrive again at the synchronization point. + The same set of threads shall arrive at the barrier in each cycle, otherwise + the behaviour is undefined. + */ + struct barrier { + barrier(int val) : expected(val), arrived(0), nexpected(val), epoch(0) { } + barrier(const barrier&) = delete; + barrier& operator=(const barrier&) = delete; + ~barrier() { } + void arrive_and_wait() { + int const myepoch = epoch.load(std::memory_order_relaxed); + if(!__arrive(myepoch)) + while(epoch.load_when_not_equal(myepoch,std::memory_order_acquire) == myepoch) + ; + } + void arrive_and_drop() { + nexpected.fetch_add(-1,std::memory_order_relaxed); + __arrive(epoch.load(std::memory_order_relaxed)); + } + private: + bool __arrive(int const myepoch) { + int const myresult = arrived.fetch_add(1,std::memory_order_acq_rel) + 1; + if(__builtin_expect(myresult == expected,0)) { + expected = nexpected.load(std::memory_order_relaxed); + arrived.store(0,std::memory_order_relaxed); + epoch.store(myepoch+1,std::memory_order_release); + return true; + } + return false; + } + int expected; + std::atomic arrived, nexpected; + synchronic epoch; + }; + + /* + A notifying barrier behaves as a barrier, but is constructed with a callable + completion function that is invoked after all threads have arrived at the + synchronization point, and before the synchronization condition is reached. + The completion may modify the set of threads that arrives at the barrier in + each cycle. + */ + struct notifying_barrier { + template + notifying_barrier(int val, T && f) : expected(val), arrived(0), nexpected(val), epoch(0), completion(std::forward(f)) { } + notifying_barrier(const notifying_barrier&) = delete; + notifying_barrier& operator=(const notifying_barrier&) = delete; + ~notifying_barrier( ) { } + void arrive_and_wait() { + int const myepoch = epoch.load(std::memory_order_relaxed); + if(!__arrive(myepoch)) + while(epoch.load_when_not_equal(myepoch,std::memory_order_acquire) == myepoch) + ; + } + void arrive_and_drop() { + nexpected.fetch_add(-1,std::memory_order_relaxed); + __arrive(epoch.load(std::memory_order_relaxed)); + } + private: + bool __arrive(int const myepoch) { + int const myresult = arrived.fetch_add(1,std::memory_order_acq_rel) + 1; + if(__builtin_expect(myresult == expected,0)) { + int const newexpected = completion(); + expected = newexpected ? newexpected : nexpected.load(std::memory_order_relaxed); + arrived.store(0,std::memory_order_relaxed); + epoch.store(myepoch+1,std::memory_order_release); + return true; + } + return false; + } + int expected; + std::atomic arrived, nexpected; + synchronic epoch; + std::function completion; + }; +}} + +#endif //__N3998_H diff --git a/lib/kokkos/core/src/impl/Kokkos_Tags.hpp b/lib/kokkos/core/src/impl/Kokkos_Tags.hpp index 4885d37376..b7e6ba23a9 100644 --- a/lib/kokkos/core/src/impl/Kokkos_Tags.hpp +++ b/lib/kokkos/core/src/impl/Kokkos_Tags.hpp @@ -50,17 +50,6 @@ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- -namespace Kokkos { -//---------------------------------------------------------------------------- - -template -struct Device { - typedef ExecutionSpace execution_space; - typedef MemorySpace memory_space; - typedef Device device_type; -}; -} - //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- @@ -103,7 +92,26 @@ template< class C > struct is_memory_traits< C , typename Impl::enable_if_type< typename C::memory_traits >::type > : public bool_< Impl::is_same< C , typename C::memory_traits >::value > {}; +} +} +namespace Kokkos { +//---------------------------------------------------------------------------- + +template< class ExecutionSpace , class MemorySpace > +struct Device { + static_assert( Impl::is_execution_space::value + , "Execution space is not valid" ); + static_assert( Impl::is_memory_space::value + , "Memory space is not valid" ); + typedef ExecutionSpace execution_space; + typedef MemorySpace memory_space; + typedef Device device_type; +}; +} + +namespace Kokkos { +namespace Impl { //---------------------------------------------------------------------------- template< class C , class Enable = void > diff --git a/lib/kokkos/core/src/impl/Kokkos_Traits.hpp b/lib/kokkos/core/src/impl/Kokkos_Traits.hpp index 52358842f5..e2e02c3fa1 100644 --- a/lib/kokkos/core/src/impl/Kokkos_Traits.hpp +++ b/lib/kokkos/core/src/impl/Kokkos_Traits.hpp @@ -51,6 +51,21 @@ namespace Kokkos { namespace Impl { +//---------------------------------------------------------------------------- +// Help with C++11 variadic argument packs + +template< unsigned I , class ... Args > +struct variadic_type { typedef void type ; }; + +template< class T , class ... Args > +struct variadic_type< 0 , T , Args ... > + { typedef T type ; }; + +template< unsigned I , class T , class ... Args > +struct variadic_type< I , T , Args ... > + { typedef typename variadic_type< I - 1 , Args ... >::type type ; }; + +//---------------------------------------------------------------------------- /* C++11 conformal compile-time type traits utilities. * Prefer to use C++11 when portably available. */ @@ -249,30 +264,51 @@ struct if_ : public if_c {}; template< typename T > struct is_integral : public integral_constant< bool , ( - Impl::is_same< T , char >::value || - Impl::is_same< T , unsigned char >::value || - Impl::is_same< T , short int >::value || - Impl::is_same< T , unsigned short int >::value || - Impl::is_same< T , int >::value || - Impl::is_same< T , unsigned int >::value || - Impl::is_same< T , long int >::value || - Impl::is_same< T , unsigned long int >::value || - Impl::is_same< T , long long int >::value || - Impl::is_same< T , unsigned long long int >::value || + std::is_same< T , char >::value || + std::is_same< T , unsigned char >::value || + std::is_same< T , short int >::value || + std::is_same< T , unsigned short int >::value || + std::is_same< T , int >::value || + std::is_same< T , unsigned int >::value || + std::is_same< T , long int >::value || + std::is_same< T , unsigned long int >::value || + std::is_same< T , long long int >::value || + std::is_same< T , unsigned long long int >::value || - Impl::is_same< T , int8_t >::value || - Impl::is_same< T , int16_t >::value || - Impl::is_same< T , int32_t >::value || - Impl::is_same< T , int64_t >::value || - Impl::is_same< T , uint8_t >::value || - Impl::is_same< T , uint16_t >::value || - Impl::is_same< T , uint32_t >::value || - Impl::is_same< T , uint64_t >::value + std::is_same< T , int8_t >::value || + std::is_same< T , int16_t >::value || + std::is_same< T , int32_t >::value || + std::is_same< T , int64_t >::value || + std::is_same< T , uint8_t >::value || + std::is_same< T , uint16_t >::value || + std::is_same< T , uint32_t >::value || + std::is_same< T , uint64_t >::value )> {}; //---------------------------------------------------------------------------- +// These 'constexpr'functions can be used as +// both regular functions and meta-function. + +/**\brief There exists integral 'k' such that N = 2^k */ +KOKKOS_INLINE_FUNCTION +constexpr bool is_integral_power_of_two( const size_t N ) +{ return ( 0 < N ) && ( 0 == ( N & ( N - 1 ) ) ); } + +/**\brief Return integral 'k' such that N = 2^k, assuming valid. */ +KOKKOS_INLINE_FUNCTION +constexpr unsigned integral_power_of_two_assume_valid( const size_t N ) +{ return N == 1 ? 0 : 1 + integral_power_of_two_assume_valid( N >> 1 ); } + +/**\brief Return integral 'k' such that N = 2^k, if exists. + * If does not exist return ~0u. + */ +KOKKOS_INLINE_FUNCTION +constexpr unsigned integral_power_of_two( const size_t N ) +{ return is_integral_power_of_two(N) ? integral_power_of_two_assume_valid(N) : ~0u ; } + +//---------------------------------------------------------------------------- template < size_t N > struct is_power_of_two diff --git a/lib/kokkos/core/src/impl/Kokkos_ViewDefault.hpp b/lib/kokkos/core/src/impl/Kokkos_ViewDefault.hpp index 8334af3a3c..94c8e13c1d 100644 --- a/lib/kokkos/core/src/impl/Kokkos_ViewDefault.hpp +++ b/lib/kokkos/core/src/impl/Kokkos_ViewDefault.hpp @@ -84,8 +84,12 @@ struct ViewAssignment< ViewDefault , ViewDefault , void > dst.m_ptr_on_device = ViewDataManagement< ViewTraits >::create_handle( src.m_ptr_on_device, src.m_tracker ); - dst.m_tracker = src.m_tracker ; - + if( dst.is_managed ) + dst.m_tracker = src.m_tracker ; + else { + dst.m_tracker = AllocationTracker(); + dst.m_management.set_unmanaged(); + } } @@ -117,7 +121,7 @@ struct ViewAssignment< ViewDefault , ViewDefault , void > size_t strides[8]; src.stride(strides); if(strides[0]!=1) { - abort("Trying to assign strided 1D View to LayoutRight or LayoutLeft which is not stride-1"); + Kokkos::abort("Trying to assign strided 1D View to LayoutRight or LayoutLeft which is not stride-1"); } dst.m_offset_map.assign( src.dimension_0(), 0, 0, 0, 0, 0, 0, 0, 0 ); @@ -125,8 +129,12 @@ struct ViewAssignment< ViewDefault , ViewDefault , void > dst.m_ptr_on_device = ViewDataManagement< ViewTraits >::create_handle( src.m_ptr_on_device, src.m_tracker ); - dst.m_tracker = src.m_tracker ; - + if( dst.is_managed ) + dst.m_tracker = src.m_tracker ; + else { + dst.m_tracker = AllocationTracker(); + dst.m_management.set_unmanaged(); + } } //------------------------------------ diff --git a/lib/kokkos/core/src/impl/Kokkos_ViewSupport.hpp b/lib/kokkos/core/src/impl/Kokkos_ViewSupport.hpp index 006b35923d..1d54b7bcce 100644 --- a/lib/kokkos/core/src/impl/Kokkos_ViewSupport.hpp +++ b/lib/kokkos/core/src/impl/Kokkos_ViewSupport.hpp @@ -44,6 +44,7 @@ #ifndef KOKKOS_VIEWSUPPORT_HPP #define KOKKOS_VIEWSUPPORT_HPP +#include #include #include @@ -114,6 +115,7 @@ template< class ExecSpace , class Type , bool Initialize > struct ViewDefaultConstruct { ViewDefaultConstruct( Type * , size_t ) {} }; +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) /** \brief ViewDataHandle provides the type of the 'data handle' which the view * uses to access data with the [] operator. It also provides @@ -240,6 +242,8 @@ public: } }; +#endif /* #if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) */ + } // namespace Impl } // namespace Kokkos diff --git a/lib/kokkos/core/src/impl/Kokkos_ViewTileLeft.hpp b/lib/kokkos/core/src/impl/Kokkos_ViewTileLeft.hpp index 0bbb781c87..beaa288ce6 100644 --- a/lib/kokkos/core/src/impl/Kokkos_ViewTileLeft.hpp +++ b/lib/kokkos/core/src/impl/Kokkos_ViewTileLeft.hpp @@ -79,8 +79,8 @@ struct ViewOffset< ShapeType )>::type > : public ShapeType { - enum { SHIFT_0 = Impl::power_of_two::value }; - enum { SHIFT_1 = Impl::power_of_two::value }; + enum { SHIFT_0 = Impl::integral_power_of_two(N0) }; + enum { SHIFT_1 = Impl::integral_power_of_two(N1) }; enum { MASK_0 = N0 - 1 }; enum { MASK_1 = N1 - 1 }; diff --git a/lib/kokkos/core/src/impl/Kokkos_hwloc.cpp b/lib/kokkos/core/src/impl/Kokkos_hwloc.cpp index 1d173fb4fb..cb561f711c 100644 --- a/lib/kokkos/core/src/impl/Kokkos_hwloc.cpp +++ b/lib/kokkos/core/src/impl/Kokkos_hwloc.cpp @@ -1,13 +1,13 @@ /* //@HEADER // ************************************************************************ -// +// // Kokkos v. 2.0 // Copyright (2014) Sandia Corporation -// +// // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, // the U.S. Government retains certain rights in this software. -// +// // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -36,7 +36,7 @@ // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Questions? Contact H. Carter Edwards (hcedwar@sandia.gov) -// +// // ************************************************************************ //@HEADER */ @@ -45,6 +45,7 @@ #include #include +#include #include #include @@ -253,6 +254,7 @@ hwloc_topology_t s_hwloc_topology(0); hwloc_bitmap_t s_hwloc_location(0); hwloc_bitmap_t s_process_binding(0); hwloc_bitmap_t s_core[ MAX_CORE ]; +bool s_can_bind_threads(true); struct Sentinel { ~Sentinel(); @@ -309,6 +311,22 @@ Sentinel::Sentinel() hwloc_get_cpubind( s_hwloc_topology , s_process_binding , HWLOC_CPUBIND_PROCESS ); + if ( hwloc_bitmap_iszero( s_process_binding ) ) { + std::cerr << "WARNING: Cannot detect process binding -- ASSUMING ALL processing units" << std::endl; + const int pu_depth = hwloc_get_type_depth( s_hwloc_topology, HWLOC_OBJ_PU ); + int num_pu = 1; + if ( pu_depth != HWLOC_TYPE_DEPTH_UNKNOWN ) { + num_pu = hwloc_get_nbobjs_by_depth( s_hwloc_topology, pu_depth ); + } + else { + std::cerr << "WARNING: Cannot detect number of processing units -- ASSUMING 1 (serial)." << std::endl; + num_pu = 1; + } + hwloc_bitmap_set_range( s_process_binding, 0, num_pu-1); + s_can_bind_threads = false; + } + + if ( remove_core_0 ) { const hwloc_obj_t core = hwloc_get_obj_by_type( s_hwloc_topology , HWLOC_OBJ_CORE , 0 ); @@ -509,6 +527,9 @@ unsigned get_available_cores_per_numa() unsigned get_available_threads_per_core() { sentinel(); return s_core_capacity ; } +bool can_bind_threads() +{ sentinel(); return s_can_bind_threads; } + //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- @@ -676,6 +697,7 @@ namespace Kokkos { namespace hwloc { bool available() { return false ; } +bool can_bind_threads() { return false ; } unsigned get_available_numa_count() { return 1 ; } unsigned get_available_cores_per_numa() { return 1 ; } diff --git a/lib/kokkos/core/src/impl/Kokkos_spinwait.cpp b/lib/kokkos/core/src/impl/Kokkos_spinwait.cpp index e16d9c4956..aff7f29f89 100644 --- a/lib/kokkos/core/src/impl/Kokkos_spinwait.cpp +++ b/lib/kokkos/core/src/impl/Kokkos_spinwait.cpp @@ -57,8 +57,15 @@ #elif defined ( KOKKOS_HAVE_WINTHREAD ) #include #define YIELD Sleep(0) +#elif defined ( _WIN32) && defined (_MSC_VER) + /* Windows w/ Visual Studio */ + #define NOMINMAX + #include + #include +#define YIELD YieldProcessor(); #elif defined ( _WIN32 ) - #define YIELD __asm__ __volatile__("pause\n":::"memory") + /* Windows w/ Intel*/ + #define YIELD __asm__ __volatile__("pause\n":::"memory") #else #include #define YIELD sched_yield() diff --git a/lib/kokkos/core/unit_test/CMakeLists.txt b/lib/kokkos/core/unit_test/CMakeLists.txt new file mode 100644 index 0000000000..e835245e25 --- /dev/null +++ b/lib/kokkos/core/unit_test/CMakeLists.txt @@ -0,0 +1,102 @@ +# +# Add test-only library for gtest to be reused by all the subpackages +# + +SET(GTEST_SOURCE_DIR ${${PARENT_PACKAGE_NAME}_SOURCE_DIR}/tpls/gtest) + +INCLUDE_DIRECTORIES(${GTEST_SOURCE_DIR}) +TRIBITS_ADD_LIBRARY( + kokkos_gtest + HEADERS ${GTEST_SOURCE_DIR}/gtest/gtest.h + SOURCES ${GTEST_SOURCE_DIR}/gtest/gtest-all.cc + TESTONLY + ) + +# +# Define the tests +# + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +IF(Kokkos_ENABLE_Serial) + TRIBITS_ADD_EXECUTABLE_AND_TEST( + UnitTest_Serial + SOURCES UnitTestMain.cpp TestSerial.cpp + COMM serial mpi + NUM_MPI_PROCS 1 + FAIL_REGULAR_EXPRESSION " FAILED " + TESTONLYLIBS kokkos_gtest + ) +ENDIF() + +IF(Kokkos_ENABLE_Pthread) + TRIBITS_ADD_EXECUTABLE_AND_TEST( + UnitTest_Threads + SOURCES UnitTestMain.cpp TestThreads.cpp + COMM serial mpi + NUM_MPI_PROCS 1 + FAIL_REGULAR_EXPRESSION " FAILED " + TESTONLYLIBS kokkos_gtest + ) +ENDIF() + +IF(Kokkos_ENABLE_OpenMP) + TRIBITS_ADD_EXECUTABLE_AND_TEST( + UnitTest_OpenMP + SOURCES UnitTestMain.cpp TestOpenMP.cpp + COMM serial mpi + NUM_MPI_PROCS 1 + FAIL_REGULAR_EXPRESSION " FAILED " + TESTONLYLIBS kokkos_gtest + ) +ENDIF() + +IF(Kokkos_ENABLE_QTHREAD) + TRIBITS_ADD_EXECUTABLE_AND_TEST( + UnitTest_Qthread + SOURCES UnitTestMain.cpp TestQthread.cpp + COMM serial mpi + NUM_MPI_PROCS 1 + FAIL_REGULAR_EXPRESSION " FAILED " + TESTONLYLIBS kokkos_gtest + ) +ENDIF() + +IF(Kokkos_ENABLE_Cuda) + TRIBITS_ADD_EXECUTABLE_AND_TEST( + UnitTest_Cuda + SOURCES UnitTestMain.cpp TestCuda.cpp + COMM serial mpi + NUM_MPI_PROCS 1 + FAIL_REGULAR_EXPRESSION " FAILED " + TESTONLYLIBS kokkos_gtest + ) +ENDIF() + +TRIBITS_ADD_EXECUTABLE_AND_TEST( + UnitTest_Default + SOURCES UnitTestMain.cpp TestDefaultDeviceType.cpp TestDefaultDeviceTypeInit.cpp + COMM serial mpi + NUM_MPI_PROCS 1 + FAIL_REGULAR_EXPRESSION " FAILED " + TESTONLYLIBS kokkos_gtest +) + +TRIBITS_ADD_EXECUTABLE_AND_TEST( + UnitTest_HWLOC + SOURCES UnitTestMain.cpp TestHWLOC.cpp + COMM serial mpi + NUM_MPI_PROCS 1 + FAIL_REGULAR_EXPRESSION " FAILED " + TESTONLYLIBS kokkos_gtest +) + +TRIBITS_ADD_EXECUTABLE_AND_TEST( + UnitTest_AllocationTracker + SOURCES UnitTestMain.cpp TestAllocationTracker.cpp + COMM serial mpi + NUM_MPI_PROCS 1 + FAIL_REGULAR_EXPRESSION " FAILED " + TESTONLYLIBS kokkos_gtest +) diff --git a/lib/kokkos/core/unit_test/Makefile b/lib/kokkos/core/unit_test/Makefile index b2d3d55066..5c69c4014f 100644 --- a/lib/kokkos/core/unit_test/Makefile +++ b/lib/kokkos/core/unit_test/Makefile @@ -1,18 +1,17 @@ KOKKOS_PATH = ../.. -GTEST_PATH = ../../TPL/gtest +GTEST_PATH = ../../tpls/gtest vpath %.cpp ${KOKKOS_PATH}/core/unit_test TEST_HEADERS = $(wildcard $(KOKKOS_PATH)/core/unit_test/*.hpp) default: build_all echo "End Build" - include $(KOKKOS_PATH)/Makefile.kokkos ifeq ($(KOKKOS_INTERNAL_USE_CUDA), 1) - CXX = nvcc_wrapper + CXX = $(NVCC_WRAPPER) CXXFLAGS ?= -O3 LINK = $(CXX) LDFLAGS ?= -lpthread @@ -25,8 +24,8 @@ endif KOKKOS_CXXFLAGS += -I$(GTEST_PATH) -I${KOKKOS_PATH}/core/unit_test -TEST_TARGETS = -TARGETS = +TEST_TARGETS = +TARGETS = ifeq ($(KOKKOS_INTERNAL_USE_CUDA), 1) OBJ_CUDA = TestCuda.o UnitTestMain.o gtest-all.o @@ -74,13 +73,16 @@ OBJ_DEFAULTINIT = TestDefaultDeviceTypeInit.o UnitTestMain.o gtest-all.o TARGETS += KokkosCore_UnitTest_DefaultInit TEST_TARGETS += test-default-init +OBJ_SYNCHRONIC = TestSynchronic.o UnitTestMain.o gtest-all.o +TARGETS += KokkosCore_UnitTest_Synchronic +TEST_TARGETS += test-synchronic KokkosCore_UnitTest_Cuda: $(OBJ_CUDA) $(KOKKOS_LINK_DEPENDS) $(LINK) $(KOKKOS_LDFLAGS) $(LDFLAGS) $(EXTRA_PATH) $(OBJ_CUDA) $(KOKKOS_LIBS) $(LIB) -o KokkosCore_UnitTest_Cuda KokkosCore_UnitTest_Threads: $(OBJ_THREADS) $(KOKKOS_LINK_DEPENDS) $(LINK) $(KOKKOS_LDFLAGS) $(LDFLAGS) $(EXTRA_PATH) $(OBJ_THREADS) $(KOKKOS_LIBS) $(LIB) -o KokkosCore_UnitTest_Threads - + KokkosCore_UnitTest_OpenMP: $(OBJ_OPENMP) $(KOKKOS_LINK_DEPENDS) $(LINK) $(KOKKOS_LDFLAGS) $(LDFLAGS) $(EXTRA_PATH) $(OBJ_OPENMP) $(KOKKOS_LIBS) $(LIB) -o KokkosCore_UnitTest_OpenMP @@ -102,6 +104,9 @@ KokkosCore_UnitTest_Default: $(OBJ_DEFAULT) $(KOKKOS_LINK_DEPENDS) KokkosCore_UnitTest_DefaultInit: $(OBJ_DEFAULTINIT) $(KOKKOS_LINK_DEPENDS) $(LINK) $(KOKKOS_LDFLAGS) $(LDFLAGS) $(EXTRA_PATH) $(OBJ_DEFAULTINIT) $(KOKKOS_LIBS) $(LIB) -o KokkosCore_UnitTest_DefaultInit +KokkosCore_UnitTest_Synchronic: $(OBJ_SYNCHRONIC) $(KOKKOS_LINK_DEPENDS) + $(LINK) $(KOKKOS_LDFLAGS) $(LDFLAGS) $(EXTRA_PATH) $(OBJ_SYNCHRONIC) $(KOKKOS_LIBS) $(LIB) -o KokkosCore_UnitTest_Synchronic + test-cuda: KokkosCore_UnitTest_Cuda ./KokkosCore_UnitTest_Cuda @@ -113,27 +118,30 @@ test-openmp: KokkosCore_UnitTest_OpenMP test-serial: KokkosCore_UnitTest_Serial ./KokkosCore_UnitTest_Serial - + test-qthread: KokkosCore_UnitTest_Qthread ./KokkosCore_UnitTest_Qthread test-hwloc: KokkosCore_UnitTest_HWLOC ./KokkosCore_UnitTest_HWLOC - + test-allocationtracker: KokkosCore_UnitTest_AllocationTracker ./KokkosCore_UnitTest_AllocationTracker - + test-default: KokkosCore_UnitTest_Default ./KokkosCore_UnitTest_Default - + test-default-init: KokkosCore_UnitTest_DefaultInit ./KokkosCore_UnitTest_DefaultInit +test-synchronic: KokkosCore_UnitTest_Synchronic + ./KokkosCore_UnitTest_Synchronic + build_all: $(TARGETS) test: $(TEST_TARGETS) - -clean: kokkos-clean + +clean: kokkos-clean rm -f *.o $(TARGETS) # Compilation rules @@ -141,6 +149,6 @@ clean: kokkos-clean %.o:%.cpp $(KOKKOS_CPP_DEPENDS) $(TEST_HEADERS) $(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) $(EXTRA_INC) -c $< -gtest-all.o:$(GTEST_PATH)/gtest/gtest-all.cc +gtest-all.o:$(GTEST_PATH)/gtest/gtest-all.cc $(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) $(EXTRA_INC) -c $(GTEST_PATH)/gtest/gtest-all.cc diff --git a/lib/kokkos/core/unit_test/TestAggregate.hpp b/lib/kokkos/core/unit_test/TestAggregate.hpp index b16e17b4cf..c106dfd873 100644 --- a/lib/kokkos/core/unit_test/TestAggregate.hpp +++ b/lib/kokkos/core/unit_test/TestAggregate.hpp @@ -711,16 +711,27 @@ int TestViewAggregate() #else /* #if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) */ +#include + namespace Test { template< class DeviceType > -int TestViewAggregate() +void TestViewAggregate() { -/* - typedef Kokkos::ViewTraits< Kokkos::Array ** , DeviceType > a32_traits ; + typedef Kokkos::Array value_type ; + + typedef Kokkos::Experimental::Impl:: + ViewDataAnalysis< value_type * , Kokkos::LayoutLeft , value_type > + analysis_1d ; + + static_assert( std::is_same< typename analysis_1d::specialize , Kokkos::Array<> >::value , "" ); + + + typedef Kokkos::ViewTraits< value_type ** , DeviceType > a32_traits ; typedef Kokkos::ViewTraits< typename a32_traits::array_scalar_type , DeviceType > flat_traits ; static_assert( std::is_same< typename a32_traits::specialize , Kokkos::Array<> >::value , "" ); + static_assert( std::is_same< typename a32_traits::value_type , value_type >::value , "" ); static_assert( a32_traits::rank == 2 , "" ); static_assert( a32_traits::rank_dynamic == 2 , "" ); @@ -730,17 +741,23 @@ int TestViewAggregate() static_assert( flat_traits::dimension::N2 == 32 , "" ); - - typedef Kokkos::View< Kokkos::Array ** , DeviceType > a32_type ; + typedef typename a32_type::array_type a32_flat_type ; - + static_assert( std::is_same< typename a32_type::value_type , value_type >::value , "" ); + static_assert( std::is_same< typename a32_type::pointer_type , double * >::value , "" ); static_assert( a32_type::Rank == 2 , "" ); static_assert( a32_flat_type::Rank == 3 , "" ); -*/ - return 0 ; + a32_type x("test",4,5); + a32_flat_type y( x ); + + ASSERT_EQ( x.extent(0) , 4 ); + ASSERT_EQ( x.extent(1) , 5 ); + ASSERT_EQ( y.extent(0) , 4 ); + ASSERT_EQ( y.extent(1) , 5 ); + ASSERT_EQ( y.extent(2) , 32 ); } } diff --git a/lib/kokkos/core/unit_test/TestAllocationTracker.cpp b/lib/kokkos/core/unit_test/TestAllocationTracker.cpp index 371b0ac758..16f13ff1a8 100644 --- a/lib/kokkos/core/unit_test/TestAllocationTracker.cpp +++ b/lib/kokkos/core/unit_test/TestAllocationTracker.cpp @@ -68,6 +68,9 @@ protected: TEST_F( alocation_tracker, simple) { + +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + using namespace Kokkos::Impl; { @@ -111,6 +114,9 @@ TEST_F( alocation_tracker, simple) EXPECT_EQ( 1u, trackers[i].ref_count() ); } } + +#endif /* #if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) */ + } TEST_F( alocation_tracker, force_leaks) @@ -125,6 +131,9 @@ TEST_F( alocation_tracker, force_leaks) TEST_F( alocation_tracker, disable_reference_counting) { + +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + using namespace Kokkos::Impl; // test ref count and label { @@ -134,12 +143,17 @@ TEST_F( alocation_tracker, disable_reference_counting) trackers[0] = AllocationTracker( MallocAllocator(), 128,"Test"); for (int i=1; i #include +#include #include #include @@ -121,13 +122,11 @@ TEST_F( cuda , memory_space ) TestMemorySpace< Kokkos::Cuda >(); } -TEST_F( cuda, spaces ) +TEST_F( cuda, uvm ) { if ( Kokkos::CudaUVMSpace::available() ) { - Kokkos::Impl::AllocationTracker tracker = Kokkos::CudaUVMSpace::allocate_and_track("uvm_ptr",sizeof(int)); - - int * uvm_ptr = (int*) tracker.alloc_ptr(); + int * uvm_ptr = (int*) Kokkos::kokkos_malloc< Kokkos::CudaUVMSpace >("uvm_ptr",sizeof(int)); *uvm_ptr = 42 ; @@ -137,6 +136,7 @@ TEST_F( cuda, spaces ) EXPECT_EQ( *uvm_ptr, int(2*42) ); + Kokkos::kokkos_free< Kokkos::CudaUVMSpace >(uvm_ptr ); } } @@ -157,6 +157,11 @@ TEST_F( cuda , impl_view_mapping ) TestViewMappingAtomic< Kokkos::Cuda >::run(); } +TEST_F( cuda , view_of_class ) +{ + TestViewMappingClassValue< Kokkos::Cuda >::run(); +} + template< class MemSpace > struct TestViewCudaTexture { @@ -284,6 +289,12 @@ TEST_F( cuda, view_api ) #endif } + +TEST_F( cuda , view_nested_view ) +{ + ::Test::view_nested_view< Kokkos::Cuda >(); +} + TEST_F( cuda, view_subview_auto_1d_left ) { TestViewSubview::test_auto_1d< Kokkos::LayoutLeft,Kokkos::Cuda >(); } @@ -361,6 +372,13 @@ TEST_F( cuda, shared_team ) TestSharedTeam< Kokkos::Cuda >(); } +#if defined (KOKKOS_HAVE_CXX11_DISPATCH_LAMBDA) +TEST_F( cuda, lambda_shared_team ) +{ + TestLambdaSharedTeam< Kokkos::Cuda >(); +} +#endif + TEST_F( cuda, reduce_dynamic ) { TestReduceDynamic< long , Kokkos::Cuda >( 10000000 ); diff --git a/lib/kokkos/core/unit_test/TestDefaultDeviceType.cpp b/lib/kokkos/core/unit_test/TestDefaultDeviceType.cpp index 73e5bf85ae..1f4a2e84d6 100644 --- a/lib/kokkos/core/unit_test/TestDefaultDeviceType.cpp +++ b/lib/kokkos/core/unit_test/TestDefaultDeviceType.cpp @@ -240,7 +240,7 @@ TEST_F( defaultdevicetype , malloc ) int* data2 = (int*) Kokkos::kokkos_malloc(0); ASSERT_TRUE(data2==NULL); - Kokkos::kokkos_free(data); + Kokkos::kokkos_free(data2); } } // namespace test diff --git a/lib/kokkos/core/unit_test/TestOpenMP.cpp b/lib/kokkos/core/unit_test/TestOpenMP.cpp index 7f3a245675..483352d1e4 100644 --- a/lib/kokkos/core/unit_test/TestOpenMP.cpp +++ b/lib/kokkos/core/unit_test/TestOpenMP.cpp @@ -52,6 +52,7 @@ #include #include +#include #include #include @@ -116,6 +117,10 @@ TEST_F( openmp, view_api) { TestViewAPI< double , Kokkos::OpenMP >(); } +TEST_F( openmp , view_nested_view ) +{ + ::Test::view_nested_view< Kokkos::OpenMP >(); +} TEST_F( openmp, view_subview_auto_1d_left ) { TestViewSubview::test_auto_1d< Kokkos::LayoutLeft,Kokkos::OpenMP >(); @@ -208,6 +213,11 @@ TEST_F( openmp, team_shared_request) { TestSharedTeam< Kokkos::OpenMP >(); } +#if defined(KOKKOS_HAVE_CXX11_DISPATCH_LAMBDA) && !defined(KOKKOS_HAVE_CUDA) +TEST_F( openmp, team_lambda_shared_request) { + TestLambdaSharedTeam< Kokkos::OpenMP >(); +} +#endif TEST_F( openmp , atomics ) { diff --git a/lib/kokkos/core/unit_test/TestQthread.cpp b/lib/kokkos/core/unit_test/TestQthread.cpp index ff40536230..edcf7e90ea 100644 --- a/lib/kokkos/core/unit_test/TestQthread.cpp +++ b/lib/kokkos/core/unit_test/TestQthread.cpp @@ -54,6 +54,7 @@ #include #include +#include #include #include @@ -99,6 +100,11 @@ TEST_F( qthread, view_api) { TestViewAPI< double , Kokkos::Qthread >(); } +TEST_F( qthread , view_nested_view ) +{ + ::Test::view_nested_view< Kokkos::Qthread >(); +} + TEST_F( qthread , range_tag ) { TestRange< Kokkos::Qthread >::test_for(1000); diff --git a/lib/kokkos/core/unit_test/TestSerial.cpp b/lib/kokkos/core/unit_test/TestSerial.cpp index 4514492e4d..212a96fdc7 100644 --- a/lib/kokkos/core/unit_test/TestSerial.cpp +++ b/lib/kokkos/core/unit_test/TestSerial.cpp @@ -209,6 +209,12 @@ TEST_F( serial , team_shared_request) { TestSharedTeam< Kokkos::Serial >(); } +#if defined(KOKKOS_HAVE_CXX11_DISPATCH_LAMBDA) && !defined(KOKKOS_HAVE_CUDA) +TEST_F( serial , team_lambda_shared_request) { + TestLambdaSharedTeam< Kokkos::Serial >(); +} +#endif + TEST_F( serial , team_scan ) { TestScanTeam< Kokkos::Serial >( 10 ); diff --git a/lib/kokkos/core/unit_test/TestSharedAlloc.hpp b/lib/kokkos/core/unit_test/TestSharedAlloc.hpp index 060f5f4605..ab5c240ac4 100644 --- a/lib/kokkos/core/unit_test/TestSharedAlloc.hpp +++ b/lib/kokkos/core/unit_test/TestSharedAlloc.hpp @@ -167,27 +167,37 @@ void test_shared_alloc() // Copy destruction function object into the allocation record rec->m_destroy = SharedAllocDestroy( & destroy_count ); + ASSERT_EQ( rec->use_count() , 0 ); + // Start tracking, increments the use count from 0 to 1 - Tracker track( rec ); + Tracker track ; + + track.assign_allocated_record_to_uninitialized( rec ); ASSERT_EQ( rec->use_count() , 1 ); + ASSERT_EQ( track.use_count() , 1 ); // Verify construction / destruction increment for ( size_t i = 0 ; i < N ; ++i ) { ASSERT_EQ( rec->use_count() , 1 ); { - Tracker local_tracker( rec ); + Tracker local_tracker ; + local_tracker.assign_allocated_record_to_uninitialized( rec ); ASSERT_EQ( rec->use_count() , 2 ); + ASSERT_EQ( local_tracker.use_count() , 2 ); } ASSERT_EQ( rec->use_count() , 1 ); + ASSERT_EQ( track.use_count() , 1 ); } Kokkos::parallel_for( range , [=]( size_t i ){ - Tracker local_tracker( rec ); + Tracker local_tracker ; + local_tracker.assign_allocated_record_to_uninitialized( rec ); ASSERT_GT( rec->use_count() , 1 ); }); ASSERT_EQ( rec->use_count() , 1 ); + ASSERT_EQ( track.use_count() , 1 ); // Destruction of 'track' object deallocates the 'rec' and invokes the destroy function object. } diff --git a/lib/kokkos/core/unit_test/TestSynchronic.cpp b/lib/kokkos/core/unit_test/TestSynchronic.cpp new file mode 100644 index 0000000000..9121dc15a1 --- /dev/null +++ b/lib/kokkos/core/unit_test/TestSynchronic.cpp @@ -0,0 +1,448 @@ +/* + +Copyright (c) 2014, NVIDIA Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +//#undef _WIN32_WINNT +//#define _WIN32_WINNT 0x0602 + +#if defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) || defined(__APPLE__) + +// Skip for now + +#else + +#include + +#ifdef USEOMP +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +//#include
+//#undef __SYNCHRONIC_COMPATIBLE + +#include +#include + +#include "TestSynchronic.hpp" + +// Uncomment to allow test to dump output +//#define VERBOSE_TEST + +namespace Test { + +unsigned next_table[] = + { + 0, 1, 2, 3, //0-3 + 4, 4, 6, 6, //4-7 + 8, 8, 8, 8, //8-11 + 12, 12, 12, 12, //12-15 + 16, 16, 16, 16, //16-19 + 16, 16, 16, 16, //20-23 + 24, 24, 24, 24, //24-27 + 24, 24, 24, 24, //28-31 + 32, 32, 32, 32, //32-35 + 32, 32, 32, 32, //36-39 + 40, 40, 40, 40, //40-43 + 40, 40, 40, 40, //44-47 + 48, 48, 48, 48, //48-51 + 48, 48, 48, 48, //52-55 + 56, 56, 56, 56, //56-59 + 56, 56, 56, 56, //60-63 + }; + +//change this if you want to allow oversubscription of the system, by default only the range {1-(system size)} is tested +#define FOR_GAUNTLET(x) for(unsigned x = (std::min)(std::thread::hardware_concurrency()*8,unsigned(sizeof(next_table)/sizeof(unsigned))); x; x = next_table[x-1]) + +//set this to override the benchmark of barriers to use OMP barriers instead of n3998 std::barrier +//#define USEOMP + +#if defined(__SYNCHRONIC_COMPATIBLE) + #define PREFIX "futex-" +#else + #define PREFIX "backoff-" +#endif + +//this test uses a custom Mersenne twister to eliminate implementation variation +MersenneTwister mt; + +int dummya = 1, dummyb =1; + +int dummy1 = 1; +std::atomic dummy2(1); +std::atomic dummy3(1); + +double time_item(int const count = (int)1E8) { + + clock_t const start = clock(); + + for(int i = 0;i < count; ++i) + mt.integer(); + + clock_t const end = clock(); + double elapsed_seconds = (end - start) / double(CLOCKS_PER_SEC); + + return elapsed_seconds / count; +} +double time_nil(int const count = (int)1E08) { + + clock_t const start = clock(); + + dummy3 = count; + for(int i = 0;i < (int)1E6; ++i) { + if(dummy1) { + // Do some work while holding the lock + int workunits = dummy3;//(int) (mtc.poissonInterval((float)num_items_critical) + 0.5f); + for (int j = 1; j < workunits; j++) + dummy1 &= j; // Do one work unit + dummy2.fetch_add(dummy1,std::memory_order_relaxed); + } + } + + clock_t const end = clock(); + double elapsed_seconds = (end - start) / double(CLOCKS_PER_SEC); + + return elapsed_seconds / count; +} + + +template +void testmutex_inner(mutex_type& m, std::atomic& t,std::atomic& wc,std::atomic& wnc, int const num_iterations, + int const num_items_critical, int const num_items_noncritical, MersenneTwister& mtc, MersenneTwister& mtnc, bool skip) { + + for(int k = 0; k < num_iterations; ++k) { + + if(num_items_noncritical) { + // Do some work without holding the lock + int workunits = num_items_noncritical;//(int) (mtnc.poissonInterval((float)num_items_noncritical) + 0.5f); + for (int i = 1; i < workunits; i++) + mtnc.integer(); // Do one work unit + wnc.fetch_add(workunits,std::memory_order_relaxed); + } + + t.fetch_add(1,std::memory_order_relaxed); + + if(!skip) { + std::unique_lock l(m); + if(num_items_critical) { + // Do some work while holding the lock + int workunits = num_items_critical;//(int) (mtc.poissonInterval((float)num_items_critical) + 0.5f); + for (int i = 1; i < workunits; i++) + mtc.integer(); // Do one work unit + wc.fetch_add(workunits,std::memory_order_relaxed); + } + } + } +} +template +void testmutex_outer(std::map>& results, std::string const& name, double critical_fraction, double critical_duration) { + + std::ostringstream truename; + truename << name << " (f=" << critical_fraction << ",d=" << critical_duration << ")"; + + std::vector& data = results[truename.str()]; + + double const workItemTime = time_item() , + nilTime = time_nil(); + + int const num_items_critical = (critical_duration <= 0 ? 0 : (std::max)( int(critical_duration / workItemTime + 0.5), int(100 * nilTime / workItemTime + 0.5))), + num_items_noncritical = (num_items_critical <= 0 ? 0 : int( ( 1 - critical_fraction ) * num_items_critical / critical_fraction + 0.5 )); + + FOR_GAUNTLET(num_threads) { + + //Kokkos::Impl::portable_sleep(std::chrono::microseconds(2000000)); + + int const num_iterations = (num_items_critical + num_items_noncritical != 0) ? +#ifdef __SYNCHRONIC_JUST_YIELD + int( 1 / ( 8 * workItemTime ) / (num_items_critical + num_items_noncritical) / num_threads + 0.5 ) : +#else + int( 1 / ( 8 * workItemTime ) / (num_items_critical + num_items_noncritical) / num_threads + 0.5 ) : +#endif +#ifdef WIN32 + int( 1 / workItemTime / (20 * num_threads * num_threads) ); +#else + int( 1 / workItemTime / (200 * num_threads * num_threads) ); +#endif + +#ifdef VERBOSE_TEST + std::cerr << "running " << truename.str() << " #" << num_threads << ", " << num_iterations << " * " << num_items_noncritical << "\n" << std::flush; +#endif + + + std::atomic t[2], wc[2], wnc[2]; + + clock_t start[2], end[2]; + for(int pass = 0; pass < 2; ++pass) { + + t[pass] = 0; + wc[pass] = 0; + wnc[pass] = 0; + + srand(num_threads); + std::vector randomsnc(num_threads), + randomsc(num_threads); + + mutex_type m; + + start[pass] = clock(); +#ifdef USEOMP + omp_set_num_threads(num_threads); + std::atomic _j(0); + #pragma omp parallel + { + int const j = _j.fetch_add(1,std::memory_order_relaxed); + testmutex_inner(m, t[pass], wc[pass], wnc[pass], num_iterations, num_items_critical, num_items_noncritical, randomsc[j], randomsnc[j], pass==0); + num_threads = omp_get_num_threads(); + } +#else + std::vector threads(num_threads); + for(unsigned j = 0; j < num_threads; ++j) + threads[j] = new std::thread([&,j](){ + testmutex_inner(m, t[pass], wc[pass], wnc[pass], num_iterations, num_items_critical, num_items_noncritical, randomsc[j], randomsnc[j], pass==0); + } + ); + for(unsigned j = 0; j < num_threads; ++j) { + threads[j]->join(); + delete threads[j]; + } +#endif + end[pass] = clock(); + } + if(t[0] != t[1]) throw std::string("mismatched iteration counts"); + if(wnc[0] != wnc[1]) throw std::string("mismatched work item counts"); + + double elapsed_seconds_0 = (end[0] - start[0]) / double(CLOCKS_PER_SEC), + elapsed_seconds_1 = (end[1] - start[1]) / double(CLOCKS_PER_SEC); + double time = (elapsed_seconds_1 - elapsed_seconds_0 - wc[1]*workItemTime) / num_iterations; + + data.push_back(time); +#ifdef VERBOSE_TEST + std::cerr << truename.str() << " : " << num_threads << "," << elapsed_seconds_1 / num_iterations << " - " << elapsed_seconds_0 / num_iterations << " - " << wc[1]*workItemTime/num_iterations << " = " << time << " \n"; +#endif + } +} + +template +void testbarrier_inner(barrier_type& b, int const num_threads, int const j, std::atomic& t,std::atomic& w, + int const num_iterations_odd, int const num_iterations_even, + int const num_items_noncritical, MersenneTwister& arg_mt, bool skip) { + + for(int k = 0; k < (std::max)(num_iterations_even,num_iterations_odd); ++k) { + + if(k >= (~j & 0x1 ? num_iterations_odd : num_iterations_even )) { + if(!skip) + b.arrive_and_drop(); + break; + } + + if(num_items_noncritical) { + // Do some work without holding the lock + int workunits = (int) (arg_mt.poissonInterval((float)num_items_noncritical) + 0.5f); + for (int i = 1; i < workunits; i++) + arg_mt.integer(); // Do one work unit + w.fetch_add(workunits,std::memory_order_relaxed); + } + + t.fetch_add(1,std::memory_order_relaxed); + + if(!skip) { + int const thiscount = (std::min)(k+1,num_iterations_odd)*((num_threads>>1)+(num_threads&1)) + (std::min)(k+1,num_iterations_even)*(num_threads>>1); + if(t.load(std::memory_order_relaxed) > thiscount) { + std::cerr << "FAILURE: some threads have run ahead of the barrier (" << t.load(std::memory_order_relaxed) << ">" << thiscount << ").\n"; + EXPECT_TRUE(false); + } +#ifdef USEOMP + #pragma omp barrier +#else + b.arrive_and_wait(); +#endif + if(t.load(std::memory_order_relaxed) < thiscount) { + std::cerr << "FAILURE: some threads have fallen behind the barrier (" << t.load(std::memory_order_relaxed) << "<" << thiscount << ").\n"; + EXPECT_TRUE(false); + } + } + } +} +template +void testbarrier_outer(std::map>& results, std::string const& name, double barrier_frequency, double phase_duration, bool randomIterations = false) { + + std::vector& data = results[name]; + + double const workItemTime = time_item(); + int const num_items_noncritical = int( phase_duration / workItemTime + 0.5 ); + + FOR_GAUNTLET(num_threads) { + + int const num_iterations = int( barrier_frequency ); +#ifdef VERBOSE_TEST + std::cerr << "running " << name << " #" << num_threads << ", " << num_iterations << " * " << num_items_noncritical << "\r" << std::flush; +#endif + + srand(num_threads); + + MersenneTwister local_mt; + int const num_iterations_odd = randomIterations ? int(local_mt.poissonInterval((float)num_iterations)+0.5f) : num_iterations, + num_iterations_even = randomIterations ? int(local_mt.poissonInterval((float)num_iterations)+0.5f) : num_iterations; + + std::atomic t[2], w[2]; + std::chrono::time_point start[2], end[2]; + for(int pass = 0; pass < 2; ++pass) { + + t[pass] = 0; + w[pass] = 0; + + srand(num_threads); + std::vector randoms(num_threads); + + barrier_type b(num_threads); + + start[pass] = std::chrono::high_resolution_clock::now(); +#ifdef USEOMP + omp_set_num_threads(num_threads); + std::atomic _j(0); + #pragma omp parallel + { + int const j = _j.fetch_add(1,std::memory_order_relaxed); + testbarrier_inner(b, num_threads, j, t[pass], w[pass], num_iterations_odd, num_iterations_even, num_items_noncritical, randoms[j], pass==0); + num_threads = omp_get_num_threads(); + } +#else + std::vector threads(num_threads); + for(unsigned j = 0; j < num_threads; ++j) + threads[j] = new std::thread([&,j](){ + testbarrier_inner(b, num_threads, j, t[pass], w[pass], num_iterations_odd, num_iterations_even, num_items_noncritical, randoms[j], pass==0); + }); + for(unsigned j = 0; j < num_threads; ++j) { + threads[j]->join(); + delete threads[j]; + } +#endif + end[pass] = std::chrono::high_resolution_clock::now(); + } + + if(t[0] != t[1]) throw std::string("mismatched iteration counts"); + if(w[0] != w[1]) throw std::string("mismatched work item counts"); + + int const phases = (std::max)(num_iterations_odd, num_iterations_even); + + std::chrono::duration elapsed_seconds_0 = end[0]-start[0], + elapsed_seconds_1 = end[1]-start[1]; + double const time = (elapsed_seconds_1.count() - elapsed_seconds_0.count()) / phases; + + data.push_back(time); +#ifdef VERBOSE_TEST + std::cerr << name << " : " << num_threads << "," << elapsed_seconds_1.count() / phases << " - " << elapsed_seconds_0.count() / phases << " = " << time << " \n"; +#endif + } +} + +template +struct mutex_tester; +template +struct mutex_tester { + static void run(std::map>& results, std::string const name[], double critical_fraction, double critical_duration) { + testmutex_outer(results, *name, critical_fraction, critical_duration); + } +}; +template +struct mutex_tester { + static void run(std::map>& results, std::string const name[], double critical_fraction, double critical_duration) { + mutex_tester::run(results, name, critical_fraction, critical_duration); + mutex_tester::run(results, ++name, critical_fraction, critical_duration); + } +}; + +TEST( synchronic, main ) +{ + //warm up + time_item(); + + //measure up +#ifdef VERBOSE_TEST + std::cerr << "measuring work item speed...\r"; + std::cerr << "work item speed is " << time_item() << " per item, nil is " << time_nil() << "\n"; +#endif + try { + + std::pair testpoints[] = { {1, 0}, /*{1E-1, 10E-3}, {5E-1, 2E-6}, {3E-1, 50E-9},*/ }; + for(auto x : testpoints ) { + + std::map> results; + + //testbarrier_outer(results, PREFIX"bar 1khz 100us", 1E3, x.second); + + std::string const names[] = { + PREFIX"tkt", PREFIX"mcs", PREFIX"ttas", PREFIX"std" +#ifdef WIN32 + ,PREFIX"srw" +#endif + }; + + //run --> + + mutex_tester< + ticket_mutex, mcs_mutex, ttas_mutex, std::mutex +#ifdef WIN32 + ,srw_mutex +#endif + >::run(results, names, x.first, x.second); + + //<-- run + +#ifdef VERBOSE_TEST + std::cout << "threads"; + for(auto & i : results) + std::cout << ",\"" << i.first << '\"'; + std::cout << std::endl; + int j = 0; + FOR_GAUNTLET(num_threads) { + std::cout << num_threads; + for(auto & i : results) + std::cout << ',' << i.second[j]; + std::cout << std::endl; + ++j; + } +#endif + } + } + catch(std::string & e) { + std::cerr << "EXCEPTION : " << e << std::endl; + EXPECT_TRUE( false ); + } +} + +} // namespace Test + +#endif diff --git a/lib/kokkos/core/unit_test/TestSynchronic.hpp b/lib/kokkos/core/unit_test/TestSynchronic.hpp new file mode 100644 index 0000000000..d820129e8b --- /dev/null +++ b/lib/kokkos/core/unit_test/TestSynchronic.hpp @@ -0,0 +1,240 @@ +/* + +Copyright (c) 2014, NVIDIA Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef TEST_SYNCHRONIC_HPP +#define TEST_SYNCHRONIC_HPP + +#include +#include + +namespace Test { + +template +struct dumb_mutex { + + dumb_mutex () : locked(0) { + } + + void lock() { + while(1) { + bool state = false; + if (locked.compare_exchange_weak(state,true,std::memory_order_acquire)) { + break; + } + while (locked.load(std::memory_order_relaxed)) { + if (!truly) { + Kokkos::Impl::portable_yield(); + } + } + } + } + + void unlock() { + locked.store(false,std::memory_order_release); + } + +private : + std::atomic locked; +}; + +#ifdef WIN32 +#include +#include +#include +struct srw_mutex { + + srw_mutex () { + InitializeSRWLock(&_lock); + } + + void lock() { + AcquireSRWLockExclusive(&_lock); + } + void unlock() { + ReleaseSRWLockExclusive(&_lock); + } + +private : + SRWLOCK _lock; +}; +#endif + +struct ttas_mutex { + + ttas_mutex() : locked(false) { + } + + ttas_mutex(const ttas_mutex&) = delete; + ttas_mutex& operator=(const ttas_mutex&) = delete; + + void lock() { + for(int i = 0;; ++i) { + bool state = false; + if(locked.compare_exchange_weak(state,true,std::memory_order_relaxed,Kokkos::Impl::notify_none)) + break; + locked.expect_update(true); + } + std::atomic_thread_fence(std::memory_order_acquire); + } + void unlock() { + locked.store(false,std::memory_order_release); + } + +private : + Kokkos::Impl::synchronic locked; +}; + +struct ticket_mutex { + + ticket_mutex() : active(0), queue(0) { + } + + ticket_mutex(const ticket_mutex&) = delete; + ticket_mutex& operator=(const ticket_mutex&) = delete; + + void lock() { + int const me = queue.fetch_add(1, std::memory_order_relaxed); + while(me != active.load_when_equal(me, std::memory_order_acquire)) + ; + } + + void unlock() { + active.fetch_add(1,std::memory_order_release); + } +private : + Kokkos::Impl::synchronic active; + std::atomic queue; +}; + +struct mcs_mutex { + + mcs_mutex() : head(nullptr) { + } + + mcs_mutex(const mcs_mutex&) = delete; + mcs_mutex& operator=(const mcs_mutex&) = delete; + + struct unique_lock { + + unique_lock(mcs_mutex & arg_m) : m(arg_m), next(nullptr), ready(false) { + + unique_lock * const h = m.head.exchange(this,std::memory_order_acquire); + if(__builtin_expect(h != nullptr,0)) { + h->next.store(this,std::memory_order_seq_cst,Kokkos::Impl::notify_one); + while(!ready.load_when_not_equal(false,std::memory_order_acquire)) + ; + } + } + + unique_lock(const unique_lock&) = delete; + unique_lock& operator=(const unique_lock&) = delete; + + ~unique_lock() { + unique_lock * h = this; + if(__builtin_expect(!m.head.compare_exchange_strong(h,nullptr,std::memory_order_release, std::memory_order_relaxed),0)) { + unique_lock * n = next.load(std::memory_order_relaxed); + while(!n) + n = next.load_when_not_equal(n,std::memory_order_relaxed); + n->ready.store(true,std::memory_order_release,Kokkos::Impl::notify_one); + } + } + + private: + mcs_mutex & m; + Kokkos::Impl::synchronic next; + Kokkos::Impl::synchronic ready; + }; + +private : + std::atomic head; +}; + +} + +namespace std { +template<> +struct unique_lock : Test::mcs_mutex::unique_lock { + unique_lock(Test::mcs_mutex & arg_m) : Test::mcs_mutex::unique_lock(arg_m) { + } + unique_lock(const unique_lock&) = delete; + unique_lock& operator=(const unique_lock&) = delete; +}; + +} + +/* #include */ +#include + +namespace Test { + +//------------------------------------- +// MersenneTwister +//------------------------------------- +#define MT_IA 397 +#define MT_LEN 624 + +class MersenneTwister +{ + volatile unsigned long m_buffer[MT_LEN][64/sizeof(unsigned long)]; + volatile int m_index; + +public: + MersenneTwister() { + for (int i = 0; i < MT_LEN; i++) + m_buffer[i][0] = rand(); + m_index = 0; + for (int i = 0; i < MT_LEN * 100; i++) + integer(); + } + unsigned long integer() { + // Indices + int i = m_index; + int i2 = m_index + 1; if (i2 >= MT_LEN) i2 = 0; // wrap-around + int j = m_index + MT_IA; if (j >= MT_LEN) j -= MT_LEN; // wrap-around + + // Twist + unsigned long s = (m_buffer[i][0] & 0x80000000) | (m_buffer[i2][0] & 0x7fffffff); + unsigned long r = m_buffer[j][0] ^ (s >> 1) ^ ((s & 1) * 0x9908B0DF); + m_buffer[m_index][0] = r; + m_index = i2; + + // Swizzle + r ^= (r >> 11); + r ^= (r << 7) & 0x9d2c5680UL; + r ^= (r << 15) & 0xefc60000UL; + r ^= (r >> 18); + return r; + } + float poissonInterval(float ooLambda) { + return -logf(1.0f - integer() * 2.3283e-10f) * ooLambda; + } +}; + +} // namespace Test + +#endif //TEST_HPP diff --git a/lib/kokkos/core/unit_test/TestTeam.hpp b/lib/kokkos/core/unit_test/TestTeam.hpp index 4849f18dfb..fb001917a3 100644 --- a/lib/kokkos/core/unit_test/TestTeam.hpp +++ b/lib/kokkos/core/unit_test/TestTeam.hpp @@ -461,6 +461,71 @@ struct TestSharedTeam { } }; +#if defined (KOKKOS_HAVE_CXX11_DISPATCH_LAMBDA) + +template< class ExecSpace > +struct TestLambdaSharedTeam { + + TestLambdaSharedTeam() + { run(); } + + void run() + { + typedef Test::SharedTeamFunctor Functor ; + typedef Kokkos::View< typename Functor::value_type , Kokkos::HostSpace , Kokkos::MemoryUnmanaged > result_type ; + typedef typename ExecSpace::scratch_memory_space shmem_space ; + + // tbd: MemoryUnmanaged should be the default for shared memory space + typedef Kokkos::View shared_int_array_type ; + + const int SHARED_COUNT = 1000; + int team_size = 1; +#ifdef KOKKOS_HAVE_CUDA + if(std::is_same::value) + team_size = 128; +#endif + Kokkos::TeamPolicy< ExecSpace > team_exec( 8192 / team_size , team_size , + Kokkos::Experimental::TeamScratchRequest(SHARED_COUNT*2*sizeof(int))); + + typename Functor::value_type error_count = 0 ; + + Kokkos::parallel_reduce( team_exec , KOKKOS_LAMBDA + ( const typename Kokkos::TeamPolicy< ExecSpace >::member_type & ind , int & update ) { + + const shared_int_array_type shared_A( ind.team_shmem() , SHARED_COUNT ); + const shared_int_array_type shared_B( ind.team_shmem() , SHARED_COUNT ); + + if ((shared_A.ptr_on_device () == NULL && SHARED_COUNT > 0) || + (shared_B.ptr_on_device () == NULL && SHARED_COUNT > 0)) { + printf ("Failed to allocate shared memory of size %lu\n", + static_cast (SHARED_COUNT)); + ++update; // failure to allocate is an error + } else { + for ( int i = ind.team_rank() ; i < SHARED_COUNT ; i += ind.team_size() ) { + shared_A[i] = i + ind.league_rank(); + shared_B[i] = 2 * i + ind.league_rank(); + } + + ind.team_barrier(); + + if ( ind.team_rank() + 1 == ind.team_size() ) { + for ( int i = 0 ; i < SHARED_COUNT ; ++i ) { + if ( shared_A[i] != i + ind.league_rank() ) { + ++update ; + } + if ( shared_B[i] != 2 * i + ind.league_rank() ) { + ++update ; + } + } + } + } + }, result_type( & error_count ) ); + + ASSERT_EQ( error_count , 0 ); + } +}; + +#endif } /*--------------------------------------------------------------------------*/ diff --git a/lib/kokkos/core/unit_test/TestThreads.cpp b/lib/kokkos/core/unit_test/TestThreads.cpp index b254aacaf8..772c822552 100644 --- a/lib/kokkos/core/unit_test/TestThreads.cpp +++ b/lib/kokkos/core/unit_test/TestThreads.cpp @@ -60,6 +60,7 @@ #include #include +#include #include #include @@ -133,6 +134,16 @@ TEST_F( threads , init ) { ; } +TEST_F( threads , dispatch ) +{ + const int repeat = 100 ; + for ( int i = 0 ; i < repeat ; ++i ) { + for ( int j = 0 ; j < repeat ; ++j ) { + Kokkos::parallel_for( Kokkos::RangePolicy< Kokkos::Threads >(0,j) + , KOKKOS_LAMBDA( int ) {} ); + }} +} + TEST_F( threads , impl_shared_alloc ) { test_shared_alloc< Kokkos::HostSpace , Kokkos::Threads >(); } @@ -153,6 +164,11 @@ TEST_F( threads, view_api) { TestViewAPI< double , Kokkos::Threads >(); } +TEST_F( threads , view_nested_view ) +{ + ::Test::view_nested_view< Kokkos::Threads >(); +} + TEST_F( threads, view_subview_auto_1d_left ) { TestViewSubview::test_auto_1d< Kokkos::LayoutLeft,Kokkos::Threads >(); } @@ -248,6 +264,12 @@ TEST_F( threads, team_shared_request) { TestSharedTeam< Kokkos::Threads >(); } +#if defined(KOKKOS_HAVE_CXX11_DISPATCH_LAMBDA) && !defined(KOKKOS_HAVE_CUDA) +TEST_F( threads, team_lambda_shared_request) { + TestLambdaSharedTeam< Kokkos::Threads >(); +} +#endif + TEST_F( threads , view_remap ) { enum { N0 = 3 , N1 = 2 , N2 = 8 , N3 = 9 }; diff --git a/lib/kokkos/core/unit_test/TestViewAPI.hpp b/lib/kokkos/core/unit_test/TestViewAPI.hpp index 1aeab1e41c..7b4dac679a 100644 --- a/lib/kokkos/core/unit_test/TestViewAPI.hpp +++ b/lib/kokkos/core/unit_test/TestViewAPI.hpp @@ -1038,9 +1038,15 @@ public: dx = dView4( "dx" , N0 ); dy = dView4( "dy" , N0 ); - + #ifndef KOKKOS_USING_EXPERIMENTAL_VIEW + ASSERT_EQ( dx.tracker().ref_count() , size_t(1) ); + #endif dView4_unmanaged unmanaged_dx = dx; + #ifndef KOKKOS_USING_EXPERIMENTAL_VIEW + ASSERT_EQ( dx.tracker().ref_count() , size_t(1) ); + #endif + dView4_unmanaged unmanaged_from_ptr_dx = dView4_unmanaged(dx.ptr_on_device(), dx.dimension_0(), dx.dimension_1(), @@ -1057,6 +1063,36 @@ public: } const_dView4 const_dx = dx ; + #ifndef KOKKOS_USING_EXPERIMENTAL_VIEW + ASSERT_EQ( dx.tracker().ref_count() , size_t(2) ); + #endif + + { + const_dView4 const_dx2; + const_dx2 = const_dx; + #ifndef KOKKOS_USING_EXPERIMENTAL_VIEW + ASSERT_EQ( dx.tracker().ref_count() , size_t(3) ); + #endif + + const_dx2 = dy; + #ifndef KOKKOS_USING_EXPERIMENTAL_VIEW + ASSERT_EQ( dx.tracker().ref_count() , size_t(2) ); + #endif + + const_dView4 const_dx3(dx); + #ifndef KOKKOS_USING_EXPERIMENTAL_VIEW + ASSERT_EQ( dx.tracker().ref_count() , size_t(3) ); + #endif + + dView4_unmanaged dx4_unmanaged(dx); + #ifndef KOKKOS_USING_EXPERIMENTAL_VIEW + ASSERT_EQ( dx.tracker().ref_count() , size_t(3) ); + #endif + } + + #ifndef KOKKOS_USING_EXPERIMENTAL_VIEW + ASSERT_EQ( dx.tracker().ref_count() , size_t(2) ); + #endif ASSERT_FALSE( dx.is_null() ); diff --git a/lib/kokkos/core/unit_test/TestViewMapping.hpp b/lib/kokkos/core/unit_test/TestViewMapping.hpp index e380984107..a184b70e61 100644 --- a/lib/kokkos/core/unit_test/TestViewMapping.hpp +++ b/lib/kokkos/core/unit_test/TestViewMapping.hpp @@ -53,17 +53,6 @@ namespace Test { -template< class RangeType > -void test_view_range( const size_t N , const RangeType & range , const size_t begin , const size_t dim ) -{ - typedef Kokkos::Experimental::Impl::ViewOffsetRange< RangeType > query ; - - ASSERT_EQ( query::begin( range ) , begin ); - ASSERT_EQ( query::dimension( N , range ) , dim ); - ASSERT_EQ( query::is_range , dim != 0 ); -} - - template< class ExecSpace > void test_view_mapping() { @@ -397,6 +386,38 @@ void test_view_mapping() //---------------------------------------- // Subview + { + // Mapping rank 4 to rank 3 + typedef Kokkos::Experimental::Impl::SubviewExtents<4,3> SubviewExtents ; + + constexpr int N0 = 1000 ; + constexpr int N1 = 2000 ; + constexpr int N2 = 3000 ; + constexpr int N3 = 4000 ; + + Kokkos::Experimental::Impl::ViewDimension dim ; + + SubviewExtents tmp( dim + , N0 / 2 + , Kokkos::Experimental::ALL + , std::pair( N2 / 4 , 10 + N2 / 4 ) + , Kokkos::pair( N3 / 4 , 20 + N3 / 4 ) + ); + + ASSERT_EQ( tmp.domain_offset(0) , N0 / 2 ); + ASSERT_EQ( tmp.domain_offset(1) , 0 ); + ASSERT_EQ( tmp.domain_offset(2) , N2 / 4 ); + ASSERT_EQ( tmp.domain_offset(3) , N3 / 4 ); + + ASSERT_EQ( tmp.range_index(0) , 1 ); + ASSERT_EQ( tmp.range_index(1) , 2 ); + ASSERT_EQ( tmp.range_index(2) , 3 ); + + ASSERT_EQ( tmp.range_extent(0) , N1 ); + ASSERT_EQ( tmp.range_extent(1) , 10 ); + ASSERT_EQ( tmp.range_extent(2) , 20 ); + } + //---------------------------------------- { constexpr int N0 = 2000 ; constexpr int N1 = 300 ; @@ -409,7 +430,14 @@ void test_view_mapping() left_s0_s0_s4 dyn_off3( std::integral_constant(), N0, N1, 0, 0, 0, 0, 0, 0 ); - stride_s0_s0_s0 stride3( dyn_off3 , sub_N0 , sub_N1 , sub_N2 , 0 , 0 , 0 , 0 , 0 ); + Kokkos::Experimental::Impl::SubviewExtents< 3 , 3 > + sub( dyn_off3.m_dim + , Kokkos::pair(0,sub_N0) + , Kokkos::pair(0,sub_N1) + , Kokkos::pair(0,sub_N2) + ); + + stride_s0_s0_s0 stride3( dyn_off3 , sub ); ASSERT_EQ( stride3.dimension_0() , sub_N0 ); ASSERT_EQ( stride3.dimension_1() , sub_N1 ); @@ -440,7 +468,14 @@ void test_view_mapping() right_s0_s0_s4 dyn_off3( std::integral_constant(), N0, N1, 0, 0, 0, 0, 0, 0 ); - stride_s0_s0_s0 stride3( dyn_off3 , sub_N0 , sub_N1 , sub_N2 , 0 , 0 , 0 , 0 , 0 ); + Kokkos::Experimental::Impl::SubviewExtents< 3 , 3 > + sub( dyn_off3.m_dim + , Kokkos::pair(0,sub_N0) + , Kokkos::pair(0,sub_N1) + , Kokkos::pair(0,sub_N2) + ); + + stride_s0_s0_s0 stride3( dyn_off3 , sub ); ASSERT_EQ( stride3.dimension_0() , sub_N0 ); ASSERT_EQ( stride3.dimension_1() , sub_N1 ); @@ -459,17 +494,16 @@ void test_view_mapping() }}} } - //---------------------------------------- - { - constexpr int N = 1000 ; - - test_view_range( N , N / 2 , N / 2 , 0 ); - test_view_range( N , Kokkos::Experimental::ALL , 0 , N ); - test_view_range( N , std::pair( N / 4 , 10 + N / 4 ) , N / 4 , 10 ); - test_view_range( N , Kokkos::pair( N / 4 , 10 + N / 4 ) , N / 4 , 10 ); - } //---------------------------------------- // view data analysis + { + using namespace Kokkos::Experimental::Impl ; + static_assert( rank_dynamic<>::value == 0 , "" ); + static_assert( rank_dynamic<1>::value == 0 , "" ); + static_assert( rank_dynamic<0>::value == 1 , "" ); + static_assert( rank_dynamic<0,1>::value == 1 , "" ); + static_assert( rank_dynamic<0,0,1>::value == 2 , "" ); + } { using namespace Kokkos::Experimental::Impl ; @@ -491,7 +525,9 @@ void test_view_mapping() static_assert( a_const_int_r5::dimension::rank == 5 , "" ); static_assert( a_const_int_r5::dimension::rank_dynamic == 2 , "" ); + static_assert( std::is_same< typename a_const_int_r5::dimension , ViewDimension<0,0,4,5,6> >::value , "" ); + static_assert( std::is_same< typename a_const_int_r5::non_const_value_type , int >::value , "" ); static_assert( a_int_r5::dimension::rank == 5 , "" ); @@ -510,18 +546,18 @@ void test_view_mapping() static_assert( a_int_r5::dimension::rank == 5 , "" ); static_assert( a_int_r5::dimension::rank_dynamic == 3 , "" ); - static_assert( a_int_r5::dimension::arg_N0 == 0 , "" ); - static_assert( a_int_r5::dimension::arg_N1 == 0 , "" ); - static_assert( a_int_r5::dimension::arg_N2 == 0 , "" ); - static_assert( a_int_r5::dimension::arg_N3 == 3 , "" ); - static_assert( a_int_r5::dimension::arg_N4 == 4 , "" ); + static_assert( a_int_r5::dimension::ArgN0 == 0 , "" ); + static_assert( a_int_r5::dimension::ArgN1 == 0 , "" ); + static_assert( a_int_r5::dimension::ArgN2 == 0 , "" ); + static_assert( a_int_r5::dimension::ArgN3 == 3 , "" ); + static_assert( a_int_r5::dimension::ArgN4 == 4 , "" ); static_assert( std::is_same< typename a_int_r5::non_const_value_type , int >::value , "" ); } { using namespace Kokkos::Experimental::Impl ; - typedef ViewDataAnalysis< const int[] , typename ViewArrayAnalysis::non_const_value_type , void > a_const_int_r1 ; + typedef ViewDataAnalysis< const int[] , void > a_const_int_r1 ; static_assert( std::is_same< typename a_const_int_r1::specialize , void >::value , "" ); static_assert( std::is_same< typename a_const_int_r1::dimension , Kokkos::Experimental::Impl::ViewDimension<0> >::value , "" ); @@ -536,10 +572,12 @@ void test_view_mapping() static_assert( std::is_same< typename a_const_int_r1::non_const_type , int * >::value , "" ); static_assert( std::is_same< typename a_const_int_r1::non_const_value_type , int >::value , "" ); - typedef ViewDataAnalysis< const int**[4] , typename ViewArrayAnalysis< const int **[4] >::non_const_value_type , void > a_const_int_r3 ; + typedef ViewDataAnalysis< const int**[4] , void > a_const_int_r3 ; static_assert( std::is_same< typename a_const_int_r3::specialize , void >::value , "" ); + static_assert( std::is_same< typename a_const_int_r3::dimension , Kokkos::Experimental::Impl::ViewDimension<0,0,4> >::value , "" ); + static_assert( std::is_same< typename a_const_int_r3::type , const int**[4] >::value , "" ); static_assert( std::is_same< typename a_const_int_r3::value_type , const int >::value , "" ); static_assert( std::is_same< typename a_const_int_r3::array_scalar_type , const int**[4] >::value , "" ); @@ -564,8 +602,9 @@ void test_view_mapping() int data[N] ; - T vr1(data,N); - C cr1(vr1); + T vr1(data,N); // view of non-const + C cr1(vr1); // view of const from view of non-const + C cr2( (const int *) data , N ); // Generate static_assert error: // T tmp( cr1 ); @@ -761,42 +800,100 @@ void test_view_mapping() ASSERT_EQ( d.dimension_0() , 5 ); ASSERT_EQ( d.dimension_1() , 6 ); } + +#if defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + /* Only works when experimental view is activated */ + { + typedef Kokkos::Experimental::View V ; + typedef Kokkos::Experimental::View U ; + + + V a("a",10); + + ASSERT_EQ( a.use_count() , 1 ); + + V b = a ; + + ASSERT_EQ( a.use_count() , 2 ); + ASSERT_EQ( b.use_count() , 2 ); + + { + U c = b ; // 'c' is compile-time unmanaged + + ASSERT_EQ( a.use_count() , 2 ); + ASSERT_EQ( b.use_count() , 2 ); + ASSERT_EQ( c.use_count() , 2 ); + + V d = c ; // 'd' is run-time unmanaged + + ASSERT_EQ( a.use_count() , 2 ); + ASSERT_EQ( b.use_count() , 2 ); + ASSERT_EQ( c.use_count() , 2 ); + ASSERT_EQ( d.use_count() , 2 ); + } + + ASSERT_EQ( a.use_count() , 2 ); + ASSERT_EQ( b.use_count() , 2 ); + + b = V(); + + ASSERT_EQ( a.use_count() , 1 ); + ASSERT_EQ( b.use_count() , 0 ); + + Kokkos::parallel_for( + Kokkos::RangePolicy< Kokkos::DefaultHostExecutionSpace >(0,10) , + [=]( int i ){ + // 'a' is captured by copy and the capture mechanism + // converts 'a' to an unmanaged copy. + // When the parallel dispatch accepts a move for the lambda + // this count should become 1 + ASSERT_EQ( a.use_count() , 2 ); + V x = a ; + ASSERT_EQ( a.use_count() , 2 ); + ASSERT_EQ( x.use_count() , 2 ); + }); + } +#endif /* #if defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) */ } template< class ExecSpace > struct TestViewMappingSubview { - constexpr static int AN = 10 ; + typedef Kokkos::pair range ; + + enum { AN = 10 }; typedef Kokkos::Experimental::View AT ; - typedef Kokkos::Experimental::Subview< AT , true > AS ; + typedef Kokkos::Experimental::View ACT ; + typedef Kokkos::Experimental::Subview< AT , range > AS ; - constexpr static int BN0 = 10 , BN1 = 11 , BN2 = 12 ; + enum { BN0 = 10 , BN1 = 11 , BN2 = 12 }; typedef Kokkos::Experimental::View BT ; - typedef Kokkos::Experimental::Subview< BT , true , true , true > BS ; + typedef Kokkos::Experimental::Subview< BT , range , range , range > BS ; - constexpr static int CN0 = 10 , CN1 = 11 , CN2 = 12 ; + enum { CN0 = 10 , CN1 = 11 , CN2 = 12 }; typedef Kokkos::Experimental::View CT ; - typedef Kokkos::Experimental::Subview< CT , true , true , true , false , false > CS ; + typedef Kokkos::Experimental::Subview< CT , range , range , range , int , int > CS ; - constexpr static int DN0 = 10 , DN1 = 11 , DN2 = 12 ; - typedef Kokkos::Experimental::View DT ; - typedef Kokkos::Experimental::Subview< DT , false , true , true , true , false > DS ; + enum { DN0 = 10 , DN1 = 11 , DN2 = 12 , DN3 = 13 , DN4 = 14 }; + typedef Kokkos::Experimental::View DT ; + typedef Kokkos::Experimental::Subview< DT , int , range , range , range , int > DS ; typedef Kokkos::Experimental::View DLT ; - typedef Kokkos::Experimental::Subview< DLT , true , false , false , false , false > DLS1 ; + typedef Kokkos::Experimental::Subview< DLT , range , int , int , int , int > DLS1 ; static_assert( DLS1::rank == 1 && std::is_same< typename DLS1::array_layout , Kokkos::LayoutLeft >::value , "Subview layout error for rank 1 subview of left-most range of LayoutLeft" ); typedef Kokkos::Experimental::View DRT ; - typedef Kokkos::Experimental::Subview< DRT , false , false , false , false , true > DRS1 ; + typedef Kokkos::Experimental::Subview< DRT , int , int , int , int , range > DRS1 ; static_assert( DRS1::rank == 1 && std::is_same< typename DRS1::array_layout , Kokkos::LayoutRight >::value , "Subview layout error for rank 1 subview of right-most range of LayoutRight" ); AT Aa ; AS Ab ; + ACT Ac ; BT Ba ; BS Bb ; CT Ca ; @@ -807,6 +904,7 @@ struct TestViewMappingSubview { TestViewMappingSubview() : Aa("Aa",AN) , Ab( Kokkos::Experimental::subview( Aa , std::pair(1,AN-1) ) ) + , Ac( Aa , std::pair(1,AN-1) ) , Ba("Ba",BN0,BN1,BN2) , Bb( Kokkos::Experimental::subview( Ba , std::pair(1,BN0-1) @@ -824,9 +922,9 @@ struct TestViewMappingSubview { , Da("Da",DN0,DN1,DN2) , Db( Kokkos::Experimental::subview( Da , 1 - , std::pair(1,DN0-1) , std::pair(1,DN1-1) , std::pair(1,DN2-1) + , std::pair(1,DN3-1) , 2 ) ) { @@ -836,7 +934,11 @@ struct TestViewMappingSubview { KOKKOS_INLINE_FUNCTION void operator()( const int , long & error_count ) const { + auto Ad = Kokkos::Experimental::subview< Kokkos::MemoryUnmanaged >( Aa , Kokkos::pair(1,AN-1) ); + for ( int i = 1 ; i < AN-1 ; ++i ) if( & Aa[i] != & Ab[i-1] ) ++error_count ; + for ( int i = 1 ; i < AN-1 ; ++i ) if( & Aa[i] != & Ac[i-1] ) ++error_count ; + for ( int i = 1 ; i < AN-1 ; ++i ) if( & Aa[i] != & Ad[i-1] ) ++error_count ; for ( int i2 = 1 ; i2 < BN2-1 ; ++i2 ) { for ( int i1 = 1 ; i1 < BN1-1 ; ++i1 ) { @@ -850,9 +952,9 @@ struct TestViewMappingSubview { if ( & Ca(i0,i1,i2,1,2) != & Cb(i0-1,i1-1,i2-1) ) ++error_count ; }}} - for ( int i2 = 1 ; i2 < DN2-1 ; ++i2 ) { - for ( int i1 = 1 ; i1 < DN1-1 ; ++i1 ) { - for ( int i0 = 1 ; i0 < DN0-1 ; ++i0 ) { + for ( int i2 = 1 ; i2 < DN3-1 ; ++i2 ) { + for ( int i1 = 1 ; i1 < DN2-1 ; ++i1 ) { + for ( int i0 = 1 ; i0 < DN1-1 ; ++i0 ) { if ( & Da(1,i0,i1,i2,2) != & Db(i0-1,i1-1,i2-1) ) ++error_count ; }}} } @@ -861,6 +963,35 @@ struct TestViewMappingSubview { { TestViewMappingSubview self ; + ASSERT_EQ( self.Aa.dimension_0() , AN ); + ASSERT_EQ( self.Ab.dimension_0() , AN - 2 ); + ASSERT_EQ( self.Ac.dimension_0() , AN - 2 ); + ASSERT_EQ( self.Ba.dimension_0() , BN0 ); + ASSERT_EQ( self.Ba.dimension_1() , BN1 ); + ASSERT_EQ( self.Ba.dimension_2() , BN2 ); + ASSERT_EQ( self.Bb.dimension_0() , BN0 - 2 ); + ASSERT_EQ( self.Bb.dimension_1() , BN1 - 2 ); + ASSERT_EQ( self.Bb.dimension_2() , BN2 - 2 ); + + ASSERT_EQ( self.Ca.dimension_0() , CN0 ); + ASSERT_EQ( self.Ca.dimension_1() , CN1 ); + ASSERT_EQ( self.Ca.dimension_2() , CN2 ); + ASSERT_EQ( self.Ca.dimension_3() , 13 ); + ASSERT_EQ( self.Ca.dimension_4() , 14 ); + ASSERT_EQ( self.Cb.dimension_0() , CN0 - 2 ); + ASSERT_EQ( self.Cb.dimension_1() , CN1 - 2 ); + ASSERT_EQ( self.Cb.dimension_2() , CN2 - 2 ); + + ASSERT_EQ( self.Da.dimension_0() , DN0 ); + ASSERT_EQ( self.Da.dimension_1() , DN1 ); + ASSERT_EQ( self.Da.dimension_2() , DN2 ); + ASSERT_EQ( self.Da.dimension_3() , DN3 ); + ASSERT_EQ( self.Da.dimension_4() , DN4 ); + + ASSERT_EQ( self.Db.dimension_0() , DN1 - 2 ); + ASSERT_EQ( self.Db.dimension_1() , DN2 - 2 ); + ASSERT_EQ( self.Db.dimension_2() , DN3 - 2 ); + ASSERT_EQ( self.Da.stride_1() , self.Db.stride_0() ); ASSERT_EQ( self.Da.stride_2() , self.Db.stride_1() ); ASSERT_EQ( self.Da.stride_3() , self.Db.stride_2() ); @@ -1073,6 +1204,51 @@ struct TestViewMappingAtomic { } }; +/*--------------------------------------------------------------------------*/ + +template< class ExecSpace > +struct TestViewMappingClassValue { + + struct ValueType { + KOKKOS_INLINE_FUNCTION + ValueType() + { +#if 0 +#if defined( KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_CUDA ) + printf("TestViewMappingClassValue construct on Cuda\n"); +#elif defined( KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_HOST ) + printf("TestViewMappingClassValue construct on Host\n"); +#else + printf("TestViewMappingClassValue construct unknown\n"); +#endif +#endif + } + KOKKOS_INLINE_FUNCTION + ~ValueType() + { +#if 0 +#if defined( KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_CUDA ) + printf("TestViewMappingClassValue destruct on Cuda\n"); +#elif defined( KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_HOST ) + printf("TestViewMappingClassValue destruct on Host\n"); +#else + printf("TestViewMappingClassValue destruct unknown\n"); +#endif +#endif + } + }; + + static void run() + { + using namespace Kokkos::Experimental ; + ExecSpace::fence(); + { + View< ValueType , ExecSpace > a("a"); + ExecSpace::fence(); + } + ExecSpace::fence(); + } +}; } /* namespace Test */ diff --git a/lib/kokkos/core/unit_test/TestViewOfClass.hpp b/lib/kokkos/core/unit_test/TestViewOfClass.hpp index 09abacd80d..9fe3fabbd6 100644 --- a/lib/kokkos/core/unit_test/TestViewOfClass.hpp +++ b/lib/kokkos/core/unit_test/TestViewOfClass.hpp @@ -52,51 +52,86 @@ namespace Test { -namespace { -volatile int nested_view_count ; -} - template< class Space > -class NestedView { -private: +struct NestedView { + Kokkos::View member ; public: KOKKOS_INLINE_FUNCTION - NestedView() -#if defined( KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_HOST ) - : member("member",2) - { Kokkos::atomic_increment( & nested_view_count ); } -#else - : member(){} -#endif - - ~NestedView() -#if defined( KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_HOST ) - { Kokkos::atomic_decrement( & nested_view_count ); } -#else + NestedView() : member() {} -#endif + KOKKOS_INLINE_FUNCTION + NestedView & operator = ( const Kokkos::View & lhs ) + { + member = lhs ; + if ( member.dimension_0() ) Kokkos::atomic_add( & member(0) , 1 ); + return *this ; + } + + KOKKOS_INLINE_FUNCTION + ~NestedView() + { + if ( member.dimension_0() ) { + Kokkos::atomic_add( & member(0) , -1 ); + } + } +}; + +template< class Space > +struct NestedViewFunctor { + + Kokkos::View< NestedView * , Space > nested ; + Kokkos::View array ; + + NestedViewFunctor( + const Kokkos::View< NestedView * , Space > & arg_nested , + const Kokkos::View & arg_array ) + : nested( arg_nested ) + , array( arg_array ) + {} + + KOKKOS_INLINE_FUNCTION + void operator()( int i ) const + { nested[i] = array ; } }; template< class Space > void view_nested_view() { - ASSERT_EQ( 0 , nested_view_count ); + Kokkos::View tracking("tracking",1); + + typename Kokkos::View::HostMirror + host_tracking = Kokkos::create_mirror( tracking ); + { Kokkos::View< NestedView * , Space > a("a_nested_view",2); - ASSERT_EQ( 2 , nested_view_count ); + + Kokkos::parallel_for( Kokkos::RangePolicy(0,2) , NestedViewFunctor( a , tracking ) ); + Kokkos::deep_copy( host_tracking , tracking ); + ASSERT_EQ( 2 , host_tracking(0) ); + Kokkos::View< NestedView * , Space > b("b_nested_view",2); - ASSERT_EQ( 4 , nested_view_count ); + Kokkos::parallel_for( Kokkos::RangePolicy(0,2) , NestedViewFunctor( b , tracking ) ); + Kokkos::deep_copy( host_tracking , tracking ); + ASSERT_EQ( 4 , host_tracking(0) ); + } - // ASSERT_EQ( 0 , nested_view_count ); + Kokkos::deep_copy( host_tracking , tracking ); + +#if defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + ASSERT_EQ( 0 , host_tracking(0) ); +#endif + } } +#if ! defined( KOKKOS_USING_EXPERIMENTAL_VIEW ) + namespace Kokkos { namespace Impl { @@ -122,5 +157,7 @@ struct ViewDefaultConstruct< ExecSpace , Test::NestedView , true > } // namespace Impl } // namespace Kokkos +#endif + /*--------------------------------------------------------------------------*/ diff --git a/lib/kokkos/core/unit_test/TestViewSubview.hpp b/lib/kokkos/core/unit_test/TestViewSubview.hpp index e0c00d3a8b..39f286e531 100644 --- a/lib/kokkos/core/unit_test/TestViewSubview.hpp +++ b/lib/kokkos/core/unit_test/TestViewSubview.hpp @@ -488,6 +488,7 @@ void test_right_0() Kokkos::View x1 = Kokkos::subview( x_static_8, 0, 1, 2, 3, 0, 1, 2, Kokkos::pair(1,3) ); + ASSERT_TRUE( x1.dimension_0() == 2 ); ASSERT_TRUE( & x1(0) == & x_static_8(0,1,2,3,0,1,2,1) ); ASSERT_TRUE( & x1(1) == & x_static_8(0,1,2,3,0,1,2,2) ); @@ -495,6 +496,8 @@ void test_right_0() Kokkos::subview( x_static_8, 0, 1, 2, Kokkos::pair(1,3) , 0, 1, 2, Kokkos::pair(1,3) ); + ASSERT_TRUE( x2.dimension_0() == 2 ); + ASSERT_TRUE( x2.dimension_1() == 2 ); ASSERT_TRUE( & x2(0,0) == & x_static_8(0,1,2,1,0,1,2,1) ); ASSERT_TRUE( & x2(1,0) == & x_static_8(0,1,2,2,0,1,2,1) ); ASSERT_TRUE( & x2(0,1) == & x_static_8(0,1,2,1,0,1,2,2) ); @@ -505,6 +508,8 @@ void test_right_0() Kokkos::subview( x_static_8, 1, Kokkos::pair(0,2), 2, 3 , Kokkos::pair(0,2), 1, 2, 3 ); + ASSERT_TRUE( sx2.dimension_0() == 2 ); + ASSERT_TRUE( sx2.dimension_1() == 2 ); ASSERT_TRUE( & sx2(0,0) == & x_static_8(1,0,2,3,0,1,2,3) ); ASSERT_TRUE( & sx2(1,0) == & x_static_8(1,1,2,3,0,1,2,3) ); ASSERT_TRUE( & sx2(0,1) == & x_static_8(1,0,2,3,1,1,2,3) ); @@ -517,6 +522,10 @@ void test_right_0() , 2, Kokkos::pair(2,4) /* of [5] */ ); + ASSERT_TRUE( sx4.dimension_0() == 2 ); + ASSERT_TRUE( sx4.dimension_1() == 2 ); + ASSERT_TRUE( sx4.dimension_2() == 2 ); + ASSERT_TRUE( sx4.dimension_3() == 2 ); for ( int i0 = 0 ; i0 < (int) sx4.dimension_0() ; ++i0 ) for ( int i1 = 0 ; i1 < (int) sx4.dimension_1() ; ++i1 ) for ( int i2 = 0 ; i2 < (int) sx4.dimension_2() ; ++i2 ) diff --git a/lib/kokkos/doc/Doxyfile b/lib/kokkos/doc/Doxyfile new file mode 100644 index 0000000000..bc5c7486b2 --- /dev/null +++ b/lib/kokkos/doc/Doxyfile @@ -0,0 +1,127 @@ +# +# Include the global look and feel options +# +@INCLUDE = ../../common/Doxyfile +# +# Package options +# +PROJECT_NAME = "Kokkos Core Kernels Package" +PROJECT_NUMBER = "Version of the Day" +OUTPUT_DIRECTORY = . +OUTPUT_LANGUAGE = English + +EXTRACT_ALL = NO +EXTRACT_PRIVATE = NO +EXTRACT_STATIC = YES +HIDE_UNDOC_MEMBERS = YES +HIDE_UNDOC_CLASSES = YES +BRIEF_MEMBER_DESC = YES +REPEAT_BRIEF = YES +ALWAYS_DETAILED_SEC = YES +FULL_PATH_NAMES = NO +STRIP_FROM_PATH = +INTERNAL_DOCS = NO +CLASS_DIAGRAMS = YES +SOURCE_BROWSER = YES +INLINE_SOURCES = NO +STRIP_CODE_COMMENTS = YES +REFERENCED_BY_RELATION = NO +REFERENCES_RELATION = NO +CASE_SENSE_NAMES = YES +HIDE_SCOPE_NAMES = NO +VERBATIM_HEADERS = YES +SHOW_INCLUDE_FILES = YES +#JAVADOC_AUTOBRIEF = YES +INHERIT_DOCS = YES +INLINE_INHERITED_MEMB = YES +INLINE_INFO = YES +SORT_MEMBER_DOCS = NO +TAB_SIZE = 2 +ENABLED_SECTIONS = +SORT_BRIEF_DOCS = NO +GENERATE_TODOLIST = YES +GENERATE_TESTLIST = YES +QUIET = NO +WARNINGS = YES +WARN_IF_UNDOCUMENTED = YES +WARN_FORMAT = "$file:$line: $text" + +# +# INPUT: Where to find files that Doxygen should process. ../classic +# has a doc/ subdirectory with its own Doxyfile that points to its own +# files. The other Kokkos subpackages don't currently have their own +# Doxyfile files, so we have to do it manually here. +# +# mfh 26 Sep 2013: I've only added those directories in the Core +# subpackage that constitute the "public interface" of that +# subpackage. Please feel free to include additional subdirectories +# of ../core if you want to generate their documentation as well. +# +# mfh 26 Sep 2013: I've only added the Kokkos subpackages here that I +# think are ready for Doxygen documentation generation. Please feel +# free to amend this list as you see fit. +# + +INPUT = index.doc ../classic ../core/src ../containers/src ../linalg/src +FILE_PATTERNS = *.hpp *.cpp *.cuh *.cu +RECURSIVE = NO +EXCLUDE_PATTERNS = *.x *.o *.out +EXAMPLE_PATH = +EXAMPLE_RECURSIVE = YES +EXAMPLE_PATTERNS = *.cpp *.hpp +IMAGE_PATH = +INPUT_FILTER = +ALPHABETICAL_INDEX = YES +COLS_IN_ALPHA_INDEX = 4 +IGNORE_PREFIX = +# +# What diagrams are created +# +CLASS_GRAPH = YES +COLLABORATION_GRAPH = NO +INCLUDE_GRAPH = NO +INCLUDED_BY_GRAPH = NO +GRAPHICAL_HIERARCHY = YES +# +# Preprocessing +# +ENABLE_PREPROCESSING = YES +MACRO_EXPANSION = YES +EXPAND_ONLY_PREDEF = YES +SEARCH_INCLUDES = YES +INCLUDE_FILE_PATTERNS = +PREDEFINED = DOXYGEN_SHOULD_SKIP_THIS DOXYGEN_USE_ONLY +INCLUDE_PATH = ../src +EXPAND_AS_DEFINED = +# +# Links to other packages +# +TAGFILES = ../../common/tag_files/teuchos.tag=../../../teuchos/doc/html ../../common/tag_files/epetra.tag=../../../epetra/doc/html \ + ../../common/tag_files/belos.tag=../../../belos/doc/html ../../common/tag_files/anasazi.tag=../../../anasazi/doc/html \ + ../../common/tag_files/kokkos.tag=../../../kokkos/doc/html +GENERATE_TAGFILE = ../../common/tag_files/tpetra.tag +ALLEXTERNALS = NO +EXTERNAL_GROUPS = NO +# +# Environment +# +PERL_PATH = /usr/bin/perl +HAVE_DOT = YES +DOT_PATH = +MAX_DOT_GRAPH_WIDTH = 1024 +MAX_DOT_GRAPH_HEIGHT = 1024 +# +# What kind of documentation is generated +# +#GENERATE_HTML = YES +#HTML_OUTPUT = html +#HTML_HEADER = includes/header.html +#HTML_FOOTER = includes/footer.html +#HTML_STYLESHEET = includes/stylesheet.css +#HTML_ALIGN_MEMBERS = YES +GENERATE_HTMLHELP = NO +DISABLE_INDEX = NO +GENERATE_LATEX = NO +GENERATE_RTF = NO +GENERATE_MAN = NO +GENERATE_XML = NO diff --git a/lib/kokkos/doc/Kokkos_PG.pdf b/lib/kokkos/doc/Kokkos_PG.pdf new file mode 100644 index 0000000000..3c415698c0 Binary files /dev/null and b/lib/kokkos/doc/Kokkos_PG.pdf differ diff --git a/lib/kokkos/doc/README b/lib/kokkos/doc/README new file mode 100644 index 0000000000..31e75f365c --- /dev/null +++ b/lib/kokkos/doc/README @@ -0,0 +1,32 @@ +Kokkos uses the Doxygen tool for providing three documentation +sources: +- man pages +- Latex User Guide +- HTML Online User Guide. + +Man Pages + +Man pages are available for all files and functions in the directory +TRILINOS_HOME/doc/kokkos/man, where TRILINOS_HOME is the location of your +copy of Trilinos. To use these pages with the Unix man utility, add +the directory to your man path as follows: + +setenv MANPATH `echo $MANPATH`:TRILINOS_HOME/doc/kokkos/man + + +LaTeX User Guide + +A postscript version of this guide is in +TRILINOS_HOME/doc/kokkos/latex/user_guide.ps. The LaTeX source is in the +directory TRILINOS_HOME/doc/kokkos/latex. + +HTML Online User Guide + +The online guide is initiated by pointing your browser to +TRILINOS_HOME/doc/kokkos/html/index.html + +Any question, comments or suggestions are welcome. Please send to +Mike Heroux at + +320-845-7695 +maherou@sandia.gov diff --git a/lib/kokkos/doc/build_docs b/lib/kokkos/doc/build_docs new file mode 100755 index 0000000000..da1d3e4f6e --- /dev/null +++ b/lib/kokkos/doc/build_docs @@ -0,0 +1,15 @@ +#!/bin/sh + +if [ $TRILINOS_HOME ]; then + echo "TRILINOS_HOME has already been set!" +else + echo "TRILINOS_HOME has not been set. Setting it!" + export TRILINOS_HOME=`pwd`/../../.. +fi + +echo +echo "Generating main Kokkos doxygen documentation ..." +echo + +doxygen Doxyfile + diff --git a/lib/kokkos/doc/index.doc b/lib/kokkos/doc/index.doc new file mode 100644 index 0000000000..27a9e4f2e7 --- /dev/null +++ b/lib/kokkos/doc/index.doc @@ -0,0 +1,72 @@ +/*! +\mainpage Trilinos/Kokkos: Shared-memory programming interface and computational kernels + +\section Kokkos_Intro Introduction + +The %Kokkos package has two main components. The first, sometimes +called "%Kokkos Array" or just "%Kokkos," implements a +performance-portable shared-memory parallel programming model and data +containers. The second, called "%Kokkos Classic," consists of +computational kernels that support the %Tpetra package. + +\section Kokkos_Kokkos The %Kokkos programming model + +%Kokkos implements a performance-portable shared-memory parallel +programming model and data containers. It lets you write an algorithm +once, and just change a template parameter to get the optimal data +layout for your hardware. %Kokkos has back-ends for the following +parallel programming models: + +- Kokkos::Threads: POSIX Threads (Pthreads) +- Kokkos::OpenMP: OpenMP +- Kokkos::Cuda: NVIDIA's CUDA programming model for graphics + processing units (GPUs) +- Kokkos::Serial: No thread parallelism + +%Kokkos also has optimizations for shared-memory parallel systems with +nonuniform memory access (NUMA). Its containers can hold data of any +primitive ("plain old") data type (and some aggregate types). %Kokkos +Array may be used as a stand-alone programming model. + +%Kokkos' parallel operations include the following: + +- parallel_for: a thread-parallel "for loop" +- parallel_reduce: a thread-parallel reduction +- parallel_scan: a thread-parallel prefix scan operation + +as well as expert-level platform-independent interfaces to thread +"teams," per-team "shared memory," synchronization, and atomic update +operations. + +%Kokkos' data containers include the following: + +- Kokkos::View: A multidimensional array suitable for thread-parallel + operations. Its layout (e.g., row-major or column-major) is + optimized by default for the particular thread-parallel device. +- Kokkos::Vector: A drop-in replacement for std::vector that eases + porting from standard sequential C++ data structures to %Kokkos' + parallel data structures. +- Kokkos::UnorderedMap: A parallel lookup table comparable in + functionality to std::unordered_map. + +%Kokkos also uses the above basic containers to implement higher-level +data structures, like sparse graphs and matrices. + +A good place to start learning about %Kokkos would be these tutorial slides from the 2013 Trilinos Users' Group meeting. + +\section Kokkos_Classic %Kokkos Classic + +"%Kokkos Classic" consists of computational kernels that support the +%Tpetra package. These kernels include sparse matrix-vector multiply, +sparse triangular solve, Gauss-Seidel, and dense vector operations. +They are templated on the type of objects (\c Scalar) on which they +operate. This component was not meant to be visible to users; it is +an implementation detail of the %Tpetra distributed linear algebra +package. + +%Kokkos Classic also implements a shared-memory parallel programming +model. This inspired and preceded the %Kokkos programming model +described in the previous section. Users should consider the %Kokkos +Classic programming model deprecated, and prefer the new %Kokkos +programming model. +*/ diff --git a/lib/kokkos/example/CMakeLists.txt b/lib/kokkos/example/CMakeLists.txt new file mode 100644 index 0000000000..3809cc2ea5 --- /dev/null +++ b/lib/kokkos/example/CMakeLists.txt @@ -0,0 +1,20 @@ + + +# Subpackage name must match what appears in kokkos/cmake/Dependencies.cmake +# +TRIBITS_SUBPACKAGE(Example) + +TRIBITS_ADD_EXAMPLE_DIRECTORIES(query_device) +TRIBITS_ADD_EXAMPLE_DIRECTORIES(fixture) +TRIBITS_ADD_EXAMPLE_DIRECTORIES(feint) +TRIBITS_ADD_EXAMPLE_DIRECTORIES(fenl) +TRIBITS_ADD_EXAMPLE_DIRECTORIES(multi_fem) +TRIBITS_ADD_EXAMPLE_DIRECTORIES(md_skeleton) +TRIBITS_ADD_EXAMPLE_DIRECTORIES(global_2_local_ids) +TRIBITS_ADD_EXAMPLE_DIRECTORIES(grow_array) +TRIBITS_ADD_EXAMPLE_DIRECTORIES(sort_array) +if(NOT Kokkos_ENABLE_Cuda) +TRIBITS_ADD_EXAMPLE_DIRECTORIES(tutorial) +endif() +TRIBITS_SUBPACKAGE_POSTPROCESS() + diff --git a/lib/kokkos/example/cmake/Dependencies.cmake b/lib/kokkos/example/cmake/Dependencies.cmake new file mode 100644 index 0000000000..0d86e78712 --- /dev/null +++ b/lib/kokkos/example/cmake/Dependencies.cmake @@ -0,0 +1,4 @@ +TRIBITS_PACKAGE_DEFINE_DEPENDENCIES( + LIB_REQUIRED_DEP_PACKAGES KokkosCore KokkosContainers KokkosAlgorithms + TEST_OPTIONAL_DEP_TPLS CUSPARSE MKL + ) diff --git a/lib/kokkos/example/feint/CMakeLists.txt b/lib/kokkos/example/feint/CMakeLists.txt new file mode 100644 index 0000000000..0018b9f9f5 --- /dev/null +++ b/lib/kokkos/example/feint/CMakeLists.txt @@ -0,0 +1,18 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../common) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../fixture) + +SET(SOURCES "") + +FILE(GLOB SOURCES *.cpp) + +LIST( APPEND SOURCES ../fixture/BoxElemPart.cpp) + +TRIBITS_ADD_EXECUTABLE( + feint + SOURCES ${SOURCES} + COMM serial mpi + ) + diff --git a/lib/kokkos/example/fenl/CMakeLists.txt b/lib/kokkos/example/fenl/CMakeLists.txt new file mode 100644 index 0000000000..150656b16e --- /dev/null +++ b/lib/kokkos/example/fenl/CMakeLists.txt @@ -0,0 +1,17 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../common) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../fixture) + +SET(SOURCES "") + +FILE( GLOB SOURCES *.cpp ) + +LIST( APPEND SOURCES ../fixture/BoxElemPart.cpp ) + +TRIBITS_ADD_EXECUTABLE( + fenl + SOURCES ${SOURCES} + COMM serial mpi + ) diff --git a/lib/kokkos/example/fenl/Makefile b/lib/kokkos/example/fenl/Makefile index 491ed4ee6d..2e64e11e1e 100644 --- a/lib/kokkos/example/fenl/Makefile +++ b/lib/kokkos/example/fenl/Makefile @@ -1,15 +1,20 @@ -KOKKOS_PATH = ../.. +KOKKOS_PATH ?= ../.. -vpath %.cpp ${KOKKOS_PATH}/example/fixture ${KOKKOS_PATH}/example/fenl +MAKEFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST))) +SRC_DIR := $(dir $(MAKEFILE_PATH)) -EXAMPLE_HEADERS = $(wildcard $(KOKKOS_PATH)/example/common/*.hpp ${KOKKOS_PATH}/example/fixture/*.hpp ${KOKKOS_PATH}/example/fenl/*.hpp) +vpath %.cpp ${SRC_DIR}/../fixture ${SRC_DIR} + +EXAMPLE_HEADERS = $(wildcard $(SRC_DIR)/../common/*.hpp ${SRC_DIR}/../fixture/*.hpp ${SRC_DIR}/*.hpp) default: build_all echo "End Build" - + include $(KOKKOS_PATH)/Makefile.kokkos -ifeq ($(KOKKOS_INTERNAL_USE_CUDA), 1) +# KOKKOS_INTERNAL_USE_CUDA is not exported to installed Makefile.kokkos +# use KOKKOS_DEVICE here +ifneq (,$(findstring Cuda,$(KOKKOS_DEVICES))) CXX = nvcc_wrapper CXXFLAGS ?= -O3 LINK = $(CXX) @@ -22,9 +27,9 @@ else endif KOKKOS_CXXFLAGS += \ - -I${KOKKOS_PATH}/example/common \ - -I${KOKKOS_PATH}/example/fixture \ - -I${KOKKOS_PATH}/example/fenl + -I${SRC_DIR}/../common \ + -I${SRC_DIR}/../fixture \ + -I${SRC_DIR} EXE_EXAMPLE_FENL = KokkosExample_Fenl @@ -42,6 +47,8 @@ build_all : $(TARGETS) test : build_all +clean: + rm -f *.o $(EXE_EXAMPLE_FENL) KokkosCore_config.* # Compilation rules diff --git a/lib/kokkos/example/fixture/CMakeLists.txt b/lib/kokkos/example/fixture/CMakeLists.txt new file mode 100644 index 0000000000..298c54c5bb --- /dev/null +++ b/lib/kokkos/example/fixture/CMakeLists.txt @@ -0,0 +1,13 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../common) + +SET(SOURCES_TEST Main.cpp TestFixture.cpp BoxElemPart.cpp ) + +# Automatically picks up 'kokkosexample_fixture' +TRIBITS_ADD_EXECUTABLE_AND_TEST( + TestFixture + SOURCES ${SOURCES_TEST} + ) + diff --git a/lib/kokkos/example/global_2_local_ids/CMakeLists.txt b/lib/kokkos/example/global_2_local_ids/CMakeLists.txt new file mode 100644 index 0000000000..9f32fe5802 --- /dev/null +++ b/lib/kokkos/example/global_2_local_ids/CMakeLists.txt @@ -0,0 +1,17 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +SET(SOURCES "") + +SET(SOURCES + G2L_Main.cpp + ) + +TRIBITS_ADD_EXECUTABLE( + global_2_local_ids + SOURCES ${SOURCES} + COMM serial mpi + ) + + diff --git a/lib/kokkos/example/global_2_local_ids/Makefile b/lib/kokkos/example/global_2_local_ids/Makefile new file mode 100644 index 0000000000..bf8fbea3e0 --- /dev/null +++ b/lib/kokkos/example/global_2_local_ids/Makefile @@ -0,0 +1,53 @@ +KOKKOS_PATH ?= ../.. + +MAKEFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST))) +SRC_DIR := $(dir $(MAKEFILE_PATH)) + +SRC = $(wildcard $(SRC_DIR)/*.cpp) +OBJ = $(SRC:$(SRC_DIR)/%.cpp=%.o) + +#SRC = $(wildcard *.cpp) +#OBJ = $(SRC:%.cpp=%.o) + +default: build + echo "Start Build" + +# use installed Makefile.kokkos +include $(KOKKOS_PATH)/Makefile.kokkos + +ifneq (,$(findstring Cuda,$(KOKKOS_DEVICES))) +CXX = $(NVCC_WRAPPER) +CXXFLAGS = -I$(SRC_DIR) -O3 +LINK = $(CXX) +LINKFLAGS = +EXE = $(addsuffix .cuda, $(shell basename $(SRC_DIR))) +#KOKKOS_DEVICES = "Cuda,OpenMP" +#KOKKOS_ARCH = "SNB,Kepler35" +else +CXX = g++ +CXXFLAGS = -I$(SRC_DIR) -O3 +LINK = $(CXX) +LINKFLAGS = +EXE = $(addsuffix .host, $(shell basename $(SRC_DIR))) +#KOKKOS_DEVICES = "OpenMP" +#KOKKOS_ARCH = "SNB" +endif + +DEPFLAGS = -M + +LIB = + + +build: $(EXE) + +$(EXE): $(OBJ) $(KOKKOS_LINK_DEPENDS) + $(LINK) $(KOKKOS_LDFLAGS) $(LINKFLAGS) $(EXTRA_PATH) $(OBJ) $(KOKKOS_LIBS) $(LIB) -o $(EXE) + +clean: + rm -f *.a *.o *.cuda *.host + +# Compilation rules + +%.o:$(SRC_DIR)/%.cpp $(KOKKOS_CPP_DEPENDS) + $(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) $(EXTRA_INC) -c $< + diff --git a/lib/kokkos/example/grow_array/CMakeLists.txt b/lib/kokkos/example/grow_array/CMakeLists.txt new file mode 100644 index 0000000000..d9ff170492 --- /dev/null +++ b/lib/kokkos/example/grow_array/CMakeLists.txt @@ -0,0 +1,14 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +SET(SOURCES "") + +FILE(GLOB SOURCES *.cpp) + +TRIBITS_ADD_EXECUTABLE( + grow_array + SOURCES ${SOURCES} + COMM serial mpi + ) + diff --git a/lib/kokkos/example/grow_array/Makefile b/lib/kokkos/example/grow_array/Makefile new file mode 100644 index 0000000000..bf8fbea3e0 --- /dev/null +++ b/lib/kokkos/example/grow_array/Makefile @@ -0,0 +1,53 @@ +KOKKOS_PATH ?= ../.. + +MAKEFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST))) +SRC_DIR := $(dir $(MAKEFILE_PATH)) + +SRC = $(wildcard $(SRC_DIR)/*.cpp) +OBJ = $(SRC:$(SRC_DIR)/%.cpp=%.o) + +#SRC = $(wildcard *.cpp) +#OBJ = $(SRC:%.cpp=%.o) + +default: build + echo "Start Build" + +# use installed Makefile.kokkos +include $(KOKKOS_PATH)/Makefile.kokkos + +ifneq (,$(findstring Cuda,$(KOKKOS_DEVICES))) +CXX = $(NVCC_WRAPPER) +CXXFLAGS = -I$(SRC_DIR) -O3 +LINK = $(CXX) +LINKFLAGS = +EXE = $(addsuffix .cuda, $(shell basename $(SRC_DIR))) +#KOKKOS_DEVICES = "Cuda,OpenMP" +#KOKKOS_ARCH = "SNB,Kepler35" +else +CXX = g++ +CXXFLAGS = -I$(SRC_DIR) -O3 +LINK = $(CXX) +LINKFLAGS = +EXE = $(addsuffix .host, $(shell basename $(SRC_DIR))) +#KOKKOS_DEVICES = "OpenMP" +#KOKKOS_ARCH = "SNB" +endif + +DEPFLAGS = -M + +LIB = + + +build: $(EXE) + +$(EXE): $(OBJ) $(KOKKOS_LINK_DEPENDS) + $(LINK) $(KOKKOS_LDFLAGS) $(LINKFLAGS) $(EXTRA_PATH) $(OBJ) $(KOKKOS_LIBS) $(LIB) -o $(EXE) + +clean: + rm -f *.a *.o *.cuda *.host + +# Compilation rules + +%.o:$(SRC_DIR)/%.cpp $(KOKKOS_CPP_DEPENDS) + $(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) $(EXTRA_INC) -c $< + diff --git a/lib/kokkos/example/md_skeleton/CMakeLists.txt b/lib/kokkos/example/md_skeleton/CMakeLists.txt new file mode 100644 index 0000000000..28412c3784 --- /dev/null +++ b/lib/kokkos/example/md_skeleton/CMakeLists.txt @@ -0,0 +1,16 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +SET(SOURCES "") +SET(LIBRARIES "") + +FILE(GLOB SOURCES *.cpp ) + +TRIBITS_ADD_EXECUTABLE( + md_skeleton + SOURCES ${SOURCES} + COMM serial mpi + DEPLIBS ${LIBRARIES} + ) + diff --git a/lib/kokkos/example/md_skeleton/Makefile b/lib/kokkos/example/md_skeleton/Makefile new file mode 100644 index 0000000000..bf8fbea3e0 --- /dev/null +++ b/lib/kokkos/example/md_skeleton/Makefile @@ -0,0 +1,53 @@ +KOKKOS_PATH ?= ../.. + +MAKEFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST))) +SRC_DIR := $(dir $(MAKEFILE_PATH)) + +SRC = $(wildcard $(SRC_DIR)/*.cpp) +OBJ = $(SRC:$(SRC_DIR)/%.cpp=%.o) + +#SRC = $(wildcard *.cpp) +#OBJ = $(SRC:%.cpp=%.o) + +default: build + echo "Start Build" + +# use installed Makefile.kokkos +include $(KOKKOS_PATH)/Makefile.kokkos + +ifneq (,$(findstring Cuda,$(KOKKOS_DEVICES))) +CXX = $(NVCC_WRAPPER) +CXXFLAGS = -I$(SRC_DIR) -O3 +LINK = $(CXX) +LINKFLAGS = +EXE = $(addsuffix .cuda, $(shell basename $(SRC_DIR))) +#KOKKOS_DEVICES = "Cuda,OpenMP" +#KOKKOS_ARCH = "SNB,Kepler35" +else +CXX = g++ +CXXFLAGS = -I$(SRC_DIR) -O3 +LINK = $(CXX) +LINKFLAGS = +EXE = $(addsuffix .host, $(shell basename $(SRC_DIR))) +#KOKKOS_DEVICES = "OpenMP" +#KOKKOS_ARCH = "SNB" +endif + +DEPFLAGS = -M + +LIB = + + +build: $(EXE) + +$(EXE): $(OBJ) $(KOKKOS_LINK_DEPENDS) + $(LINK) $(KOKKOS_LDFLAGS) $(LINKFLAGS) $(EXTRA_PATH) $(OBJ) $(KOKKOS_LIBS) $(LIB) -o $(EXE) + +clean: + rm -f *.a *.o *.cuda *.host + +# Compilation rules + +%.o:$(SRC_DIR)/%.cpp $(KOKKOS_CPP_DEPENDS) + $(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) $(EXTRA_INC) -c $< + diff --git a/lib/kokkos/example/multi_fem/CMakeLists.txt b/lib/kokkos/example/multi_fem/CMakeLists.txt new file mode 100644 index 0000000000..e3a40bc26f --- /dev/null +++ b/lib/kokkos/example/multi_fem/CMakeLists.txt @@ -0,0 +1,16 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +SET(SOURCES "") + +FILE(GLOB SOURCES *.cpp) + +SET(LIBRARIES kokkoscore) + +TRIBITS_ADD_EXECUTABLE( + multi_fem + SOURCES ${SOURCES} + COMM serial mpi + ) + diff --git a/lib/kokkos/example/multi_fem/Makefile b/lib/kokkos/example/multi_fem/Makefile new file mode 100644 index 0000000000..72e1768fcb --- /dev/null +++ b/lib/kokkos/example/multi_fem/Makefile @@ -0,0 +1,53 @@ +KOKKOS_PATH ?= ../.. + +MAKEFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST))) +SRC_DIR := $(dir $(MAKEFILE_PATH)) + +SRC = $(wildcard $(SRC_DIR)/*.cpp) +OBJ = $(SRC:$(SRC_DIR)/%.cpp=%.o) + +#SRC = $(wildcard *.cpp) +#OBJ = $(SRC:%.cpp=%.o) + +default: build + echo "Start Build" + +# use installed Makefile.kokkos +include $(KOKKOS_PATH)/Makefile.kokkos + +ifneq (,$(findstring Cuda,$(KOKKOS_DEVICES))) +CXX = $(NVCC_WRAPPER) +CXXFLAGS = -I$(SRC_DIR) -I$(CUDA_PATH) -O3 +LINK = $(CXX) +LINKFLAGS = -L$(CUDA_PATH)/lib64 -lcusparse +EXE = $(addsuffix .cuda, $(shell basename $(SRC_DIR))) +#KOKKOS_DEVICES = "Cuda,OpenMP" +#KOKKOS_ARCH = "SNB,Kepler35" +else +CXX = g++ +CXXFLAGS = -I$(SRC_DIR) -O3 +LINK = $(CXX) +LINKFLAGS = +EXE = $(addsuffix .host, $(shell basename $(SRC_DIR))) +#KOKKOS_DEVICES = "OpenMP" +#KOKKOS_ARCH = "SNB" +endif + +DEPFLAGS = -M + +LIB = + + +build: $(EXE) + +$(EXE): $(OBJ) $(KOKKOS_LINK_DEPENDS) + $(LINK) $(KOKKOS_LDFLAGS) $(LINKFLAGS) $(EXTRA_PATH) $(OBJ) $(KOKKOS_LIBS) $(LIB) -o $(EXE) + +clean: + rm -f *.a *.o *.cuda *.host + +# Compilation rules + +%.o:$(SRC_DIR)/%.cpp $(KOKKOS_CPP_DEPENDS) + $(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) $(EXTRA_INC) -c $< + diff --git a/lib/kokkos/example/query_device/CMakeLists.txt b/lib/kokkos/example/query_device/CMakeLists.txt new file mode 100644 index 0000000000..dade7f01fe --- /dev/null +++ b/lib/kokkos/example/query_device/CMakeLists.txt @@ -0,0 +1,14 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +SET(SOURCES "") + +FILE(GLOB SOURCES *.cpp) + +TRIBITS_ADD_EXECUTABLE( + query_device + SOURCES ${SOURCES} + COMM serial mpi + ) + diff --git a/lib/kokkos/example/query_device/Makefile b/lib/kokkos/example/query_device/Makefile new file mode 100644 index 0000000000..bf8fbea3e0 --- /dev/null +++ b/lib/kokkos/example/query_device/Makefile @@ -0,0 +1,53 @@ +KOKKOS_PATH ?= ../.. + +MAKEFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST))) +SRC_DIR := $(dir $(MAKEFILE_PATH)) + +SRC = $(wildcard $(SRC_DIR)/*.cpp) +OBJ = $(SRC:$(SRC_DIR)/%.cpp=%.o) + +#SRC = $(wildcard *.cpp) +#OBJ = $(SRC:%.cpp=%.o) + +default: build + echo "Start Build" + +# use installed Makefile.kokkos +include $(KOKKOS_PATH)/Makefile.kokkos + +ifneq (,$(findstring Cuda,$(KOKKOS_DEVICES))) +CXX = $(NVCC_WRAPPER) +CXXFLAGS = -I$(SRC_DIR) -O3 +LINK = $(CXX) +LINKFLAGS = +EXE = $(addsuffix .cuda, $(shell basename $(SRC_DIR))) +#KOKKOS_DEVICES = "Cuda,OpenMP" +#KOKKOS_ARCH = "SNB,Kepler35" +else +CXX = g++ +CXXFLAGS = -I$(SRC_DIR) -O3 +LINK = $(CXX) +LINKFLAGS = +EXE = $(addsuffix .host, $(shell basename $(SRC_DIR))) +#KOKKOS_DEVICES = "OpenMP" +#KOKKOS_ARCH = "SNB" +endif + +DEPFLAGS = -M + +LIB = + + +build: $(EXE) + +$(EXE): $(OBJ) $(KOKKOS_LINK_DEPENDS) + $(LINK) $(KOKKOS_LDFLAGS) $(LINKFLAGS) $(EXTRA_PATH) $(OBJ) $(KOKKOS_LIBS) $(LIB) -o $(EXE) + +clean: + rm -f *.a *.o *.cuda *.host + +# Compilation rules + +%.o:$(SRC_DIR)/%.cpp $(KOKKOS_CPP_DEPENDS) + $(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) $(EXTRA_INC) -c $< + diff --git a/lib/kokkos/example/sort_array/CMakeLists.txt b/lib/kokkos/example/sort_array/CMakeLists.txt new file mode 100644 index 0000000000..3e58198d7b --- /dev/null +++ b/lib/kokkos/example/sort_array/CMakeLists.txt @@ -0,0 +1,15 @@ +INCLUDE(TribitsAddExecutableAndTest) + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +SET(SOURCES "") + +FILE(GLOB SOURCES *.cpp) + +TRIBITS_ADD_EXECUTABLE( + sort_array + SOURCES ${SOURCES} + COMM serial mpi + ) + diff --git a/lib/kokkos/example/sort_array/Makefile b/lib/kokkos/example/sort_array/Makefile new file mode 100644 index 0000000000..bf8fbea3e0 --- /dev/null +++ b/lib/kokkos/example/sort_array/Makefile @@ -0,0 +1,53 @@ +KOKKOS_PATH ?= ../.. + +MAKEFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST))) +SRC_DIR := $(dir $(MAKEFILE_PATH)) + +SRC = $(wildcard $(SRC_DIR)/*.cpp) +OBJ = $(SRC:$(SRC_DIR)/%.cpp=%.o) + +#SRC = $(wildcard *.cpp) +#OBJ = $(SRC:%.cpp=%.o) + +default: build + echo "Start Build" + +# use installed Makefile.kokkos +include $(KOKKOS_PATH)/Makefile.kokkos + +ifneq (,$(findstring Cuda,$(KOKKOS_DEVICES))) +CXX = $(NVCC_WRAPPER) +CXXFLAGS = -I$(SRC_DIR) -O3 +LINK = $(CXX) +LINKFLAGS = +EXE = $(addsuffix .cuda, $(shell basename $(SRC_DIR))) +#KOKKOS_DEVICES = "Cuda,OpenMP" +#KOKKOS_ARCH = "SNB,Kepler35" +else +CXX = g++ +CXXFLAGS = -I$(SRC_DIR) -O3 +LINK = $(CXX) +LINKFLAGS = +EXE = $(addsuffix .host, $(shell basename $(SRC_DIR))) +#KOKKOS_DEVICES = "OpenMP" +#KOKKOS_ARCH = "SNB" +endif + +DEPFLAGS = -M + +LIB = + + +build: $(EXE) + +$(EXE): $(OBJ) $(KOKKOS_LINK_DEPENDS) + $(LINK) $(KOKKOS_LDFLAGS) $(LINKFLAGS) $(EXTRA_PATH) $(OBJ) $(KOKKOS_LIBS) $(LIB) -o $(EXE) + +clean: + rm -f *.a *.o *.cuda *.host + +# Compilation rules + +%.o:$(SRC_DIR)/%.cpp $(KOKKOS_CPP_DEPENDS) + $(CXX) $(KOKKOS_CPPFLAGS) $(KOKKOS_CXXFLAGS) $(CXXFLAGS) $(EXTRA_INC) -c $< + diff --git a/lib/kokkos/example/tutorial/01_hello_world/CMakeLists.txt b/lib/kokkos/example/tutorial/01_hello_world/CMakeLists.txt new file mode 100644 index 0000000000..5e5b1fcb46 --- /dev/null +++ b/lib/kokkos/example/tutorial/01_hello_world/CMakeLists.txt @@ -0,0 +1,11 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +# This is a tutorial, not a test, so we don't ask CTest to run it. +TRIBITS_ADD_EXECUTABLE( + tutorial_01_hello_world + SOURCES hello_world.cpp + COMM serial mpi + ) + diff --git a/lib/kokkos/example/tutorial/01_hello_world_lambda/CMakeLists.txt b/lib/kokkos/example/tutorial/01_hello_world_lambda/CMakeLists.txt new file mode 100644 index 0000000000..3fcca4bceb --- /dev/null +++ b/lib/kokkos/example/tutorial/01_hello_world_lambda/CMakeLists.txt @@ -0,0 +1,13 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +IF (Kokkos_ENABLE_CXX11) + # This is a tutorial, not a test, so we don't ask CTest to run it. + TRIBITS_ADD_EXECUTABLE( + tutorial_01_hello_world_lambda + SOURCES hello_world_lambda.cpp + COMM serial mpi + ) +ENDIF () + diff --git a/lib/kokkos/example/tutorial/02_simple_reduce/CMakeLists.txt b/lib/kokkos/example/tutorial/02_simple_reduce/CMakeLists.txt new file mode 100644 index 0000000000..7c78db840f --- /dev/null +++ b/lib/kokkos/example/tutorial/02_simple_reduce/CMakeLists.txt @@ -0,0 +1,10 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +# This is a tutorial, not a test, so we don't ask CTest to run it. +TRIBITS_ADD_EXECUTABLE( + tutorial_02_simple_reduce + SOURCES simple_reduce.cpp + COMM serial mpi + ) diff --git a/lib/kokkos/example/tutorial/02_simple_reduce_lambda/CMakeLists.txt b/lib/kokkos/example/tutorial/02_simple_reduce_lambda/CMakeLists.txt new file mode 100644 index 0000000000..e2e3a929f1 --- /dev/null +++ b/lib/kokkos/example/tutorial/02_simple_reduce_lambda/CMakeLists.txt @@ -0,0 +1,12 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +IF (Kokkos_ENABLE_CXX11) + # This is a tutorial, not a test, so we don't ask CTest to run it. + TRIBITS_ADD_EXECUTABLE( + tutorial_02_simple_reduce_lambda + SOURCES simple_reduce_lambda.cpp + COMM serial mpi + ) +ENDIF () diff --git a/lib/kokkos/example/tutorial/03_simple_view/CMakeLists.txt b/lib/kokkos/example/tutorial/03_simple_view/CMakeLists.txt new file mode 100644 index 0000000000..7475a99e49 --- /dev/null +++ b/lib/kokkos/example/tutorial/03_simple_view/CMakeLists.txt @@ -0,0 +1,10 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +# This is a tutorial, not a test, so we don't ask CTest to run it. +TRIBITS_ADD_EXECUTABLE( + tutorial_03_simple_view + SOURCES simple_view.cpp + COMM serial mpi + ) diff --git a/lib/kokkos/example/tutorial/03_simple_view_lambda/CMakeLists.txt b/lib/kokkos/example/tutorial/03_simple_view_lambda/CMakeLists.txt new file mode 100644 index 0000000000..601fe452a4 --- /dev/null +++ b/lib/kokkos/example/tutorial/03_simple_view_lambda/CMakeLists.txt @@ -0,0 +1,12 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +IF (Kokkos_ENABLE_CXX11) + # This is a tutorial, not a test, so we don't ask CTest to run it. + TRIBITS_ADD_EXECUTABLE( + tutorial_03_simple_view_lambda + SOURCES simple_view_lambda.cpp + COMM serial mpi + ) +ENDIF () diff --git a/lib/kokkos/example/tutorial/04_simple_memoryspaces/CMakeLists.txt b/lib/kokkos/example/tutorial/04_simple_memoryspaces/CMakeLists.txt new file mode 100644 index 0000000000..09f209077a --- /dev/null +++ b/lib/kokkos/example/tutorial/04_simple_memoryspaces/CMakeLists.txt @@ -0,0 +1,10 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +# This is a tutorial, not a test, so we don't ask CTest to run it. +TRIBITS_ADD_EXECUTABLE( + tutorial_04_simple_memoryspaces + SOURCES simple_memoryspaces.cpp + COMM serial mpi + ) diff --git a/lib/kokkos/example/tutorial/05_simple_atomics/CMakeLists.txt b/lib/kokkos/example/tutorial/05_simple_atomics/CMakeLists.txt new file mode 100644 index 0000000000..5a5790fb04 --- /dev/null +++ b/lib/kokkos/example/tutorial/05_simple_atomics/CMakeLists.txt @@ -0,0 +1,10 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +# This is a tutorial, not a test, so we don't ask CTest to run it. +TRIBITS_ADD_EXECUTABLE( + tutorial_05_simple_atomics + SOURCES simple_atomics.cpp + COMM serial mpi + ) diff --git a/lib/kokkos/example/tutorial/Advanced_Views/01_data_layouts/CMakeLists.txt b/lib/kokkos/example/tutorial/Advanced_Views/01_data_layouts/CMakeLists.txt new file mode 100644 index 0000000000..2eb3a8f6c9 --- /dev/null +++ b/lib/kokkos/example/tutorial/Advanced_Views/01_data_layouts/CMakeLists.txt @@ -0,0 +1,10 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +# This is a tutorial, not a test, so we don't ask CTest to run it. +TRIBITS_ADD_EXECUTABLE( + tutorial_advancedviews_01_data_layouts + SOURCES data_layouts.cpp + COMM serial mpi + ) diff --git a/lib/kokkos/example/tutorial/Advanced_Views/02_memory_traits/CMakeLists.txt b/lib/kokkos/example/tutorial/Advanced_Views/02_memory_traits/CMakeLists.txt new file mode 100644 index 0000000000..1963e544d7 --- /dev/null +++ b/lib/kokkos/example/tutorial/Advanced_Views/02_memory_traits/CMakeLists.txt @@ -0,0 +1,10 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +# This is a tutorial, not a test, so we don't ask CTest to run it. +TRIBITS_ADD_EXECUTABLE( + tutorial_advancedviews_02_memory_traits + SOURCES memory_traits.cpp + COMM serial mpi + ) diff --git a/lib/kokkos/example/tutorial/Advanced_Views/03_subviews/CMakeLists.txt b/lib/kokkos/example/tutorial/Advanced_Views/03_subviews/CMakeLists.txt new file mode 100644 index 0000000000..cbe394c78b --- /dev/null +++ b/lib/kokkos/example/tutorial/Advanced_Views/03_subviews/CMakeLists.txt @@ -0,0 +1,10 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +# This is a tutorial, not a test, so we don't ask CTest to run it. +TRIBITS_ADD_EXECUTABLE( + tutorial_advancedviews_03_subviews + SOURCES subviews.cpp + COMM serial mpi + ) diff --git a/lib/kokkos/example/tutorial/Advanced_Views/04_dualviews/CMakeLists.txt b/lib/kokkos/example/tutorial/Advanced_Views/04_dualviews/CMakeLists.txt new file mode 100644 index 0000000000..300dab128e --- /dev/null +++ b/lib/kokkos/example/tutorial/Advanced_Views/04_dualviews/CMakeLists.txt @@ -0,0 +1,10 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +# This is a tutorial, not a test, so we don't ask CTest to run it. +TRIBITS_ADD_EXECUTABLE( + tutorial_advancedviews_04_dualviews + SOURCES dual_view.cpp + COMM serial mpi + ) diff --git a/lib/kokkos/example/tutorial/Advanced_Views/05_NVIDIA_UVM/CMakeLists.txt b/lib/kokkos/example/tutorial/Advanced_Views/05_NVIDIA_UVM/CMakeLists.txt new file mode 100644 index 0000000000..f0ed569f9f --- /dev/null +++ b/lib/kokkos/example/tutorial/Advanced_Views/05_NVIDIA_UVM/CMakeLists.txt @@ -0,0 +1,13 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +IF (Kokkos_ENABLE_Cuda_UVM) + # This is a tutorial, not a test, so we don't ask CTest to run it. + TRIBITS_ADD_EXECUTABLE( + tutorial_advancedviews_05_nvidia_uvm + SOURCES uvm_example.cpp + COMM serial mpi + DEPLIBS kokkoscontainers kokkoscore + ) +ENDIF () diff --git a/lib/kokkos/example/tutorial/Advanced_Views/CMakeLists.txt b/lib/kokkos/example/tutorial/Advanced_Views/CMakeLists.txt new file mode 100644 index 0000000000..f4f1addc55 --- /dev/null +++ b/lib/kokkos/example/tutorial/Advanced_Views/CMakeLists.txt @@ -0,0 +1,9 @@ + +TRIBITS_ADD_EXAMPLE_DIRECTORIES(01_data_layouts) +TRIBITS_ADD_EXAMPLE_DIRECTORIES(02_memory_traits) +TRIBITS_ADD_EXAMPLE_DIRECTORIES(03_subviews) +TRIBITS_ADD_EXAMPLE_DIRECTORIES(04_dualviews) + +IF (Kokkos_ENABLE_Cuda_UVM) + TRIBITS_ADD_EXAMPLE_DIRECTORIES(05_NVIDIA_UVM) +ENDIF () diff --git a/lib/kokkos/example/tutorial/CMakeLists.txt b/lib/kokkos/example/tutorial/CMakeLists.txt new file mode 100644 index 0000000000..d1fd4c0ae9 --- /dev/null +++ b/lib/kokkos/example/tutorial/CMakeLists.txt @@ -0,0 +1,17 @@ + +TRIBITS_ADD_EXAMPLE_DIRECTORIES(01_hello_world) +TRIBITS_ADD_EXAMPLE_DIRECTORIES(02_simple_reduce) +TRIBITS_ADD_EXAMPLE_DIRECTORIES(03_simple_view) +TRIBITS_ADD_EXAMPLE_DIRECTORIES(04_simple_memoryspaces) +TRIBITS_ADD_EXAMPLE_DIRECTORIES(05_simple_atomics) +TRIBITS_ADD_EXAMPLE_DIRECTORIES(Advanced_Views) +TRIBITS_ADD_EXAMPLE_DIRECTORIES(Hierarchical_Parallelism) + +IF (Kokkos_ENABLE_CXX11) + TRIBITS_ADD_EXAMPLE_DIRECTORIES(01_hello_world_lambda) + TRIBITS_ADD_EXAMPLE_DIRECTORIES(02_simple_reduce_lambda) + TRIBITS_ADD_EXAMPLE_DIRECTORIES(03_simple_view_lambda) +ENDIF () + + + diff --git a/lib/kokkos/example/tutorial/Hierarchical_Parallelism/01_thread_teams/CMakeLists.txt b/lib/kokkos/example/tutorial/Hierarchical_Parallelism/01_thread_teams/CMakeLists.txt new file mode 100644 index 0000000000..2d8a514a45 --- /dev/null +++ b/lib/kokkos/example/tutorial/Hierarchical_Parallelism/01_thread_teams/CMakeLists.txt @@ -0,0 +1,10 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +# This is a tutorial, not a test, so we don't ask CTest to run it. +TRIBITS_ADD_EXECUTABLE( + tutorial_hierarchicalparallelism_01_thread_teams + SOURCES thread_teams.cpp + COMM serial mpi + ) diff --git a/lib/kokkos/example/tutorial/Hierarchical_Parallelism/01_thread_teams_lambda/CMakeLists.txt b/lib/kokkos/example/tutorial/Hierarchical_Parallelism/01_thread_teams_lambda/CMakeLists.txt new file mode 100644 index 0000000000..ec7f1e1159 --- /dev/null +++ b/lib/kokkos/example/tutorial/Hierarchical_Parallelism/01_thread_teams_lambda/CMakeLists.txt @@ -0,0 +1,13 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +IF (Kokkos_ENABLE_CXX11) + # This is a tutorial, not a test, so we don't ask CTest to run it. + TRIBITS_ADD_EXECUTABLE( + tutorial_hierarchical_01_thread_teams_lambda + SOURCES thread_teams_lambda.cpp + COMM serial mpi + ) +ENDIF () + diff --git a/lib/kokkos/example/tutorial/Hierarchical_Parallelism/02_nested_parallel_for/CMakeLists.txt b/lib/kokkos/example/tutorial/Hierarchical_Parallelism/02_nested_parallel_for/CMakeLists.txt new file mode 100644 index 0000000000..e660405345 --- /dev/null +++ b/lib/kokkos/example/tutorial/Hierarchical_Parallelism/02_nested_parallel_for/CMakeLists.txt @@ -0,0 +1,10 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +# This is a tutorial, not a test, so we don't ask CTest to run it. +TRIBITS_ADD_EXECUTABLE( + tutorial_hierarchicalparallelism_02_nested_parallel_for + SOURCES nested_parallel_for.cpp + COMM serial mpi + ) diff --git a/lib/kokkos/example/tutorial/Hierarchical_Parallelism/03_vectorization/CMakeLists.txt b/lib/kokkos/example/tutorial/Hierarchical_Parallelism/03_vectorization/CMakeLists.txt new file mode 100644 index 0000000000..ea6b0b1e42 --- /dev/null +++ b/lib/kokkos/example/tutorial/Hierarchical_Parallelism/03_vectorization/CMakeLists.txt @@ -0,0 +1,16 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +# This is a tutorial, not a test, so we don't ask CTest to run it. + +IF(Kokkos_ENABLE_CXX11) + +TRIBITS_ADD_EXECUTABLE( + tutorial_hierarchicalparallelism_03_vectorization + SOURCES vectorization.cpp + COMM serial mpi + ) + +ENDIF() + diff --git a/lib/kokkos/example/tutorial/Hierarchical_Parallelism/04_team_scan/CMakeLists.txt b/lib/kokkos/example/tutorial/Hierarchical_Parallelism/04_team_scan/CMakeLists.txt new file mode 100644 index 0000000000..15ad5d7803 --- /dev/null +++ b/lib/kokkos/example/tutorial/Hierarchical_Parallelism/04_team_scan/CMakeLists.txt @@ -0,0 +1,10 @@ + +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}) +INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) + +# This is a tutorial, not a test, so we don't ask CTest to run it. +TRIBITS_ADD_EXECUTABLE( + tutorial_hierarchicalparallelism_04_team_scan + SOURCES team_scan.cpp + COMM serial mpi + ) diff --git a/lib/kokkos/example/tutorial/Hierarchical_Parallelism/CMakeLists.txt b/lib/kokkos/example/tutorial/Hierarchical_Parallelism/CMakeLists.txt new file mode 100644 index 0000000000..e03d7aeb90 --- /dev/null +++ b/lib/kokkos/example/tutorial/Hierarchical_Parallelism/CMakeLists.txt @@ -0,0 +1,8 @@ + +TRIBITS_ADD_EXAMPLE_DIRECTORIES(01_thread_teams) + +IF (Kokkos_ENABLE_CXX11) + TRIBITS_ADD_EXAMPLE_DIRECTORIES(01_thread_teams_lambda) + TRIBITS_ADD_EXAMPLE_DIRECTORIES(02_nested_parallel_for) + TRIBITS_ADD_EXAMPLE_DIRECTORIES(03_vectorization) +ENDIF () diff --git a/lib/kokkos/generate_makefile.bash b/lib/kokkos/generate_makefile.bash index e9e103e74d..f60bc7be8b 100755 --- a/lib/kokkos/generate_makefile.bash +++ b/lib/kokkos/generate_makefile.bash @@ -60,6 +60,9 @@ case $key in --compiler*) COMPILER="${key#*=}" ;; + --with-options*) + KOKKOS_OPT="${key#*=}" + ;; --help) echo "Kokkos configure options:" echo "--kokkos-path=/Path/To/Kokkos: Path to the Kokkos root directory" @@ -91,6 +94,8 @@ case $key in echo " KOKKOS_LDFLAGS (such as -fopenmp, -lpthread, etc.)" echo "--with-gtest=/Path/To/Gtest: set path to gtest (used in unit and performance tests" echo "--with-hwloc=/Path/To/Hwloc: set path to hwloc" + echo "--with-options=[OPTIONS]: additional options to Kokkos:" + echo " aggressive_vectorization = add ivdep on loops" exit 0 ;; *) @@ -147,6 +152,9 @@ fi if [ ${#QTHREAD_PATH} -gt 0 ]; then KOKKOS_OPTIONS="${KOKKOS_OPTIONS} QTHREAD_PATH=${QTHREAD_PATH}" fi +if [ ${#KOKKOS_OPT} -gt 0 ]; then +KOKKOS_OPTIONS="${KOKKOS_OPTIONS} KOKKOS_OPTIONS=${KOKKOS_OPT}" +fi mkdir core mkdir core/unit_test mkdir core/perf_test diff --git a/lib/kokkos/tpls/gtest/gtest/LICENSE b/lib/kokkos/tpls/gtest/gtest/LICENSE new file mode 100644 index 0000000000..1941a11f8c --- /dev/null +++ b/lib/kokkos/tpls/gtest/gtest/LICENSE @@ -0,0 +1,28 @@ +Copyright 2008, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/lib/kokkos/tpls/gtest/gtest/README b/lib/kokkos/tpls/gtest/gtest/README new file mode 100644 index 0000000000..82964ecc32 --- /dev/null +++ b/lib/kokkos/tpls/gtest/gtest/README @@ -0,0 +1,13 @@ +This is a fused source version of gtest 1.7.0. All that should be necessary to +start using gtest in your package is to declare the dependency and include +gtest/gtest.h. + +However, because some of the packages that are developed in Sierra do not use a +fused source version of gtest we need to make it possible for them to build with +this version as well as with their native build. To facilitate this we have +created symlinks for the other gtest headers that they use to the fused source +gtest.h. This will make it possible for them find the headers while still using +the fuse source version. This should not have any ill effects since the header is +protected and allows for only using the non-gtest.h headers in their files. + + diff --git a/lib/kokkos/tpls/gtest/gtest/gtest-all.cc b/lib/kokkos/tpls/gtest/gtest/gtest-all.cc new file mode 100644 index 0000000000..538c78db93 --- /dev/null +++ b/lib/kokkos/tpls/gtest/gtest/gtest-all.cc @@ -0,0 +1,9594 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: mheule@google.com (Markus Heule) +// +// Google C++ Testing Framework (Google Test) +// +// Sometimes it's desirable to build Google Test by compiling a single file. +// This file serves this purpose. + +// This line ensures that gtest.h can be compiled on its own, even +// when it's fused. +#include "gtest/gtest.h" + +// The following lines pull in the real gtest *.cc files. +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// The Google C++ Testing Framework (Google Test) + +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// Utilities for testing Google Test itself and code that uses Google Test +// (e.g. frameworks built on top of Google Test). + +#ifndef GTEST_INCLUDE_GTEST_GTEST_SPI_H_ +#define GTEST_INCLUDE_GTEST_GTEST_SPI_H_ + + +namespace testing { + +// This helper class can be used to mock out Google Test failure reporting +// so that we can test Google Test or code that builds on Google Test. +// +// An object of this class appends a TestPartResult object to the +// TestPartResultArray object given in the constructor whenever a Google Test +// failure is reported. It can either intercept only failures that are +// generated in the same thread that created this object or it can intercept +// all generated failures. The scope of this mock object can be controlled with +// the second argument to the two arguments constructor. +class GTEST_API_ ScopedFakeTestPartResultReporter + : public TestPartResultReporterInterface { + public: + // The two possible mocking modes of this object. + enum InterceptMode { + INTERCEPT_ONLY_CURRENT_THREAD, // Intercepts only thread local failures. + INTERCEPT_ALL_THREADS // Intercepts all failures. + }; + + // The c'tor sets this object as the test part result reporter used + // by Google Test. The 'result' parameter specifies where to report the + // results. This reporter will only catch failures generated in the current + // thread. DEPRECATED + explicit ScopedFakeTestPartResultReporter(TestPartResultArray* result); + + // Same as above, but you can choose the interception scope of this object. + ScopedFakeTestPartResultReporter(InterceptMode intercept_mode, + TestPartResultArray* result); + + // The d'tor restores the previous test part result reporter. + virtual ~ScopedFakeTestPartResultReporter(); + + // Appends the TestPartResult object to the TestPartResultArray + // received in the constructor. + // + // This method is from the TestPartResultReporterInterface + // interface. + virtual void ReportTestPartResult(const TestPartResult& result); + private: + void Init(); + + const InterceptMode intercept_mode_; + TestPartResultReporterInterface* old_reporter_; + TestPartResultArray* const result_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedFakeTestPartResultReporter); +}; + +namespace internal { + +// A helper class for implementing EXPECT_FATAL_FAILURE() and +// EXPECT_NONFATAL_FAILURE(). Its destructor verifies that the given +// TestPartResultArray contains exactly one failure that has the given +// type and contains the given substring. If that's not the case, a +// non-fatal failure will be generated. +class GTEST_API_ SingleFailureChecker { + public: + // The constructor remembers the arguments. + SingleFailureChecker(const TestPartResultArray* results, + TestPartResult::Type type, + const string& substr); + ~SingleFailureChecker(); + private: + const TestPartResultArray* const results_; + const TestPartResult::Type type_; + const string substr_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(SingleFailureChecker); +}; + +} // namespace internal + +} // namespace testing + +// A set of macros for testing Google Test assertions or code that's expected +// to generate Google Test fatal failures. It verifies that the given +// statement will cause exactly one fatal Google Test failure with 'substr' +// being part of the failure message. +// +// There are two different versions of this macro. EXPECT_FATAL_FAILURE only +// affects and considers failures generated in the current thread and +// EXPECT_FATAL_FAILURE_ON_ALL_THREADS does the same but for all threads. +// +// The verification of the assertion is done correctly even when the statement +// throws an exception or aborts the current function. +// +// Known restrictions: +// - 'statement' cannot reference local non-static variables or +// non-static members of the current object. +// - 'statement' cannot return a value. +// - You cannot stream a failure message to this macro. +// +// Note that even though the implementations of the following two +// macros are much alike, we cannot refactor them to use a common +// helper macro, due to some peculiarity in how the preprocessor +// works. The AcceptsMacroThatExpandsToUnprotectedComma test in +// gtest_unittest.cc will fail to compile if we do that. +#define EXPECT_FATAL_FAILURE(statement, substr) \ + do { \ + class GTestExpectFatalFailureHelper {\ + public:\ + static void Execute() { statement; }\ + };\ + ::testing::TestPartResultArray gtest_failures;\ + ::testing::internal::SingleFailureChecker gtest_checker(\ + >est_failures, ::testing::TestPartResult::kFatalFailure, (substr));\ + {\ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ + ::testing::ScopedFakeTestPartResultReporter:: \ + INTERCEPT_ONLY_CURRENT_THREAD, >est_failures);\ + GTestExpectFatalFailureHelper::Execute();\ + }\ + } while (::testing::internal::AlwaysFalse()) + +#define EXPECT_FATAL_FAILURE_ON_ALL_THREADS(statement, substr) \ + do { \ + class GTestExpectFatalFailureHelper {\ + public:\ + static void Execute() { statement; }\ + };\ + ::testing::TestPartResultArray gtest_failures;\ + ::testing::internal::SingleFailureChecker gtest_checker(\ + >est_failures, ::testing::TestPartResult::kFatalFailure, (substr));\ + {\ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ + ::testing::ScopedFakeTestPartResultReporter:: \ + INTERCEPT_ALL_THREADS, >est_failures);\ + GTestExpectFatalFailureHelper::Execute();\ + }\ + } while (::testing::internal::AlwaysFalse()) + +// A macro for testing Google Test assertions or code that's expected to +// generate Google Test non-fatal failures. It asserts that the given +// statement will cause exactly one non-fatal Google Test failure with 'substr' +// being part of the failure message. +// +// There are two different versions of this macro. EXPECT_NONFATAL_FAILURE only +// affects and considers failures generated in the current thread and +// EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS does the same but for all threads. +// +// 'statement' is allowed to reference local variables and members of +// the current object. +// +// The verification of the assertion is done correctly even when the statement +// throws an exception or aborts the current function. +// +// Known restrictions: +// - You cannot stream a failure message to this macro. +// +// Note that even though the implementations of the following two +// macros are much alike, we cannot refactor them to use a common +// helper macro, due to some peculiarity in how the preprocessor +// works. If we do that, the code won't compile when the user gives +// EXPECT_NONFATAL_FAILURE() a statement that contains a macro that +// expands to code containing an unprotected comma. The +// AcceptsMacroThatExpandsToUnprotectedComma test in gtest_unittest.cc +// catches that. +// +// For the same reason, we have to write +// if (::testing::internal::AlwaysTrue()) { statement; } +// instead of +// GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) +// to avoid an MSVC warning on unreachable code. +#define EXPECT_NONFATAL_FAILURE(statement, substr) \ + do {\ + ::testing::TestPartResultArray gtest_failures;\ + ::testing::internal::SingleFailureChecker gtest_checker(\ + >est_failures, ::testing::TestPartResult::kNonFatalFailure, \ + (substr));\ + {\ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ + ::testing::ScopedFakeTestPartResultReporter:: \ + INTERCEPT_ONLY_CURRENT_THREAD, >est_failures);\ + if (::testing::internal::AlwaysTrue()) { statement; }\ + }\ + } while (::testing::internal::AlwaysFalse()) + +#define EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substr) \ + do {\ + ::testing::TestPartResultArray gtest_failures;\ + ::testing::internal::SingleFailureChecker gtest_checker(\ + >est_failures, ::testing::TestPartResult::kNonFatalFailure, \ + (substr));\ + {\ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ + ::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS, \ + >est_failures);\ + if (::testing::internal::AlwaysTrue()) { statement; }\ + }\ + } while (::testing::internal::AlwaysFalse()) + +#endif // GTEST_INCLUDE_GTEST_GTEST_SPI_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include // NOLINT +#include +#include + +#if GTEST_OS_LINUX + +// TODO(kenton@google.com): Use autoconf to detect availability of +// gettimeofday(). +# define GTEST_HAS_GETTIMEOFDAY_ 1 + +# include // NOLINT +# include // NOLINT +# include // NOLINT +// Declares vsnprintf(). This header is not available on Windows. +# include // NOLINT +# include // NOLINT +# include // NOLINT +# include // NOLINT +# include + +#elif GTEST_OS_SYMBIAN +# define GTEST_HAS_GETTIMEOFDAY_ 1 +# include // NOLINT + +#elif GTEST_OS_ZOS +# define GTEST_HAS_GETTIMEOFDAY_ 1 +# include // NOLINT + +// On z/OS we additionally need strings.h for strcasecmp. +# include // NOLINT + +#elif GTEST_OS_WINDOWS_MOBILE // We are on Windows CE. + +# include // NOLINT + +#elif GTEST_OS_WINDOWS // We are on Windows proper. + +# include // NOLINT +# include // NOLINT +# include // NOLINT +# include // NOLINT + +# if GTEST_OS_WINDOWS_MINGW +// MinGW has gettimeofday() but not _ftime64(). +// TODO(kenton@google.com): Use autoconf to detect availability of +// gettimeofday(). +// TODO(kenton@google.com): There are other ways to get the time on +// Windows, like GetTickCount() or GetSystemTimeAsFileTime(). MinGW +// supports these. consider using them instead. +# define GTEST_HAS_GETTIMEOFDAY_ 1 +# include // NOLINT +# endif // GTEST_OS_WINDOWS_MINGW + +// cpplint thinks that the header is already included, so we want to +// silence it. +# include // NOLINT + +#else + +// Assume other platforms have gettimeofday(). +// TODO(kenton@google.com): Use autoconf to detect availability of +// gettimeofday(). +# define GTEST_HAS_GETTIMEOFDAY_ 1 + +// cpplint thinks that the header is already included, so we want to +// silence it. +# include // NOLINT +# include // NOLINT + +#endif // GTEST_OS_LINUX + +#if GTEST_HAS_EXCEPTIONS +# include +#endif + +#if GTEST_CAN_STREAM_RESULTS_ +# include // NOLINT +# include // NOLINT +#endif + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick is to +// prevent a user from accidentally including gtest-internal-inl.h in +// his code. +#define GTEST_IMPLEMENTATION_ 1 +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Utility functions and classes used by the Google C++ testing framework. +// +// Author: wan@google.com (Zhanyong Wan) +// +// This file contains purely Google Test's internal implementation. Please +// DO NOT #INCLUDE IT IN A USER PROGRAM. + +#ifndef GTEST_SRC_GTEST_INTERNAL_INL_H_ +#define GTEST_SRC_GTEST_INTERNAL_INL_H_ + +// GTEST_IMPLEMENTATION_ is defined to 1 iff the current translation unit is +// part of Google Test's implementation; otherwise it's undefined. +#if !GTEST_IMPLEMENTATION_ +// A user is trying to include this from his code - just say no. +# error "gtest-internal-inl.h is part of Google Test's internal implementation." +# error "It must not be included except by Google Test itself." +#endif // GTEST_IMPLEMENTATION_ + +#ifndef _WIN32_WCE +# include +#endif // !_WIN32_WCE +#include +#include // For strtoll/_strtoul64/malloc/free. +#include // For memmove. + +#include +#include +#include + + +#if GTEST_CAN_STREAM_RESULTS_ +# include // NOLINT +# include // NOLINT +#endif + +#if GTEST_OS_WINDOWS +# include // NOLINT +#endif // GTEST_OS_WINDOWS + + +namespace testing { + +// Declares the flags. +// +// We don't want the users to modify this flag in the code, but want +// Google Test's own unit tests to be able to access it. Therefore we +// declare it here as opposed to in gtest.h. +GTEST_DECLARE_bool_(death_test_use_fork); + +namespace internal { + +// The value of GetTestTypeId() as seen from within the Google Test +// library. This is solely for testing GetTestTypeId(). +GTEST_API_ extern const TypeId kTestTypeIdInGoogleTest; + +// Names of the flags (needed for parsing Google Test flags). +const char kAlsoRunDisabledTestsFlag[] = "also_run_disabled_tests"; +const char kBreakOnFailureFlag[] = "break_on_failure"; +const char kCatchExceptionsFlag[] = "catch_exceptions"; +const char kColorFlag[] = "color"; +const char kFilterFlag[] = "filter"; +const char kListTestsFlag[] = "list_tests"; +const char kOutputFlag[] = "output"; +const char kPrintTimeFlag[] = "print_time"; +const char kRandomSeedFlag[] = "random_seed"; +const char kRepeatFlag[] = "repeat"; +const char kShuffleFlag[] = "shuffle"; +const char kStackTraceDepthFlag[] = "stack_trace_depth"; +const char kStreamResultToFlag[] = "stream_result_to"; +const char kThrowOnFailureFlag[] = "throw_on_failure"; + +// A valid random seed must be in [1, kMaxRandomSeed]. +const int kMaxRandomSeed = 99999; + +// g_help_flag is true iff the --help flag or an equivalent form is +// specified on the command line. +GTEST_API_ extern bool g_help_flag; + +// Returns the current time in milliseconds. +GTEST_API_ TimeInMillis GetTimeInMillis(); + +// Returns true iff Google Test should use colors in the output. +GTEST_API_ bool ShouldUseColor(bool stdout_is_tty); + +// Formats the given time in milliseconds as seconds. +GTEST_API_ std::string FormatTimeInMillisAsSeconds(TimeInMillis ms); + +// Converts the given time in milliseconds to a date string in the ISO 8601 +// format, without the timezone information. N.B.: due to the use the +// non-reentrant localtime() function, this function is not thread safe. Do +// not use it in any code that can be called from multiple threads. +GTEST_API_ std::string FormatEpochTimeInMillisAsIso8601(TimeInMillis ms); + +// Parses a string for an Int32 flag, in the form of "--flag=value". +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +GTEST_API_ bool ParseInt32Flag( + const char* str, const char* flag, Int32* value); + +// Returns a random seed in range [1, kMaxRandomSeed] based on the +// given --gtest_random_seed flag value. +inline int GetRandomSeedFromFlag(Int32 random_seed_flag) { + const unsigned int raw_seed = (random_seed_flag == 0) ? + static_cast(GetTimeInMillis()) : + static_cast(random_seed_flag); + + // Normalizes the actual seed to range [1, kMaxRandomSeed] such that + // it's easy to type. + const int normalized_seed = + static_cast((raw_seed - 1U) % + static_cast(kMaxRandomSeed)) + 1; + return normalized_seed; +} + +// Returns the first valid random seed after 'seed'. The behavior is +// undefined if 'seed' is invalid. The seed after kMaxRandomSeed is +// considered to be 1. +inline int GetNextRandomSeed(int seed) { + GTEST_CHECK_(1 <= seed && seed <= kMaxRandomSeed) + << "Invalid random seed " << seed << " - must be in [1, " + << kMaxRandomSeed << "]."; + const int next_seed = seed + 1; + return (next_seed > kMaxRandomSeed) ? 1 : next_seed; +} + +// This class saves the values of all Google Test flags in its c'tor, and +// restores them in its d'tor. +class GTestFlagSaver { + public: + // The c'tor. + GTestFlagSaver() { + also_run_disabled_tests_ = GTEST_FLAG(also_run_disabled_tests); + break_on_failure_ = GTEST_FLAG(break_on_failure); + catch_exceptions_ = GTEST_FLAG(catch_exceptions); + color_ = GTEST_FLAG(color); + death_test_style_ = GTEST_FLAG(death_test_style); + death_test_use_fork_ = GTEST_FLAG(death_test_use_fork); + filter_ = GTEST_FLAG(filter); + internal_run_death_test_ = GTEST_FLAG(internal_run_death_test); + list_tests_ = GTEST_FLAG(list_tests); + output_ = GTEST_FLAG(output); + print_time_ = GTEST_FLAG(print_time); + random_seed_ = GTEST_FLAG(random_seed); + repeat_ = GTEST_FLAG(repeat); + shuffle_ = GTEST_FLAG(shuffle); + stack_trace_depth_ = GTEST_FLAG(stack_trace_depth); + stream_result_to_ = GTEST_FLAG(stream_result_to); + throw_on_failure_ = GTEST_FLAG(throw_on_failure); + } + + // The d'tor is not virtual. DO NOT INHERIT FROM THIS CLASS. + ~GTestFlagSaver() { + GTEST_FLAG(also_run_disabled_tests) = also_run_disabled_tests_; + GTEST_FLAG(break_on_failure) = break_on_failure_; + GTEST_FLAG(catch_exceptions) = catch_exceptions_; + GTEST_FLAG(color) = color_; + GTEST_FLAG(death_test_style) = death_test_style_; + GTEST_FLAG(death_test_use_fork) = death_test_use_fork_; + GTEST_FLAG(filter) = filter_; + GTEST_FLAG(internal_run_death_test) = internal_run_death_test_; + GTEST_FLAG(list_tests) = list_tests_; + GTEST_FLAG(output) = output_; + GTEST_FLAG(print_time) = print_time_; + GTEST_FLAG(random_seed) = random_seed_; + GTEST_FLAG(repeat) = repeat_; + GTEST_FLAG(shuffle) = shuffle_; + GTEST_FLAG(stack_trace_depth) = stack_trace_depth_; + GTEST_FLAG(stream_result_to) = stream_result_to_; + GTEST_FLAG(throw_on_failure) = throw_on_failure_; + } + + private: + // Fields for saving the original values of flags. + bool also_run_disabled_tests_; + bool break_on_failure_; + bool catch_exceptions_; + std::string color_; + std::string death_test_style_; + bool death_test_use_fork_; + std::string filter_; + std::string internal_run_death_test_; + bool list_tests_; + std::string output_; + bool print_time_; + internal::Int32 random_seed_; + internal::Int32 repeat_; + bool shuffle_; + internal::Int32 stack_trace_depth_; + std::string stream_result_to_; + bool throw_on_failure_; +} GTEST_ATTRIBUTE_UNUSED_; + +// Converts a Unicode code point to a narrow string in UTF-8 encoding. +// code_point parameter is of type UInt32 because wchar_t may not be +// wide enough to contain a code point. +// If the code_point is not a valid Unicode code point +// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be converted +// to "(Invalid Unicode 0xXXXXXXXX)". +GTEST_API_ std::string CodePointToUtf8(UInt32 code_point); + +// Converts a wide string to a narrow string in UTF-8 encoding. +// The wide string is assumed to have the following encoding: +// UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin, Symbian OS) +// UTF-32 if sizeof(wchar_t) == 4 (on Linux) +// Parameter str points to a null-terminated wide string. +// Parameter num_chars may additionally limit the number +// of wchar_t characters processed. -1 is used when the entire string +// should be processed. +// If the string contains code points that are not valid Unicode code points +// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output +// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding +// and contains invalid UTF-16 surrogate pairs, values in those pairs +// will be encoded as individual Unicode characters from Basic Normal Plane. +GTEST_API_ std::string WideStringToUtf8(const wchar_t* str, int num_chars); + +// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file +// if the variable is present. If a file already exists at this location, this +// function will write over it. If the variable is present, but the file cannot +// be created, prints an error and exits. +void WriteToShardStatusFileIfNeeded(); + +// Checks whether sharding is enabled by examining the relevant +// environment variable values. If the variables are present, +// but inconsistent (e.g., shard_index >= total_shards), prints +// an error and exits. If in_subprocess_for_death_test, sharding is +// disabled because it must only be applied to the original test +// process. Otherwise, we could filter out death tests we intended to execute. +GTEST_API_ bool ShouldShard(const char* total_shards_str, + const char* shard_index_str, + bool in_subprocess_for_death_test); + +// Parses the environment variable var as an Int32. If it is unset, +// returns default_val. If it is not an Int32, prints an error and +// and aborts. +GTEST_API_ Int32 Int32FromEnvOrDie(const char* env_var, Int32 default_val); + +// Given the total number of shards, the shard index, and the test id, +// returns true iff the test should be run on this shard. The test id is +// some arbitrary but unique non-negative integer assigned to each test +// method. Assumes that 0 <= shard_index < total_shards. +GTEST_API_ bool ShouldRunTestOnShard( + int total_shards, int shard_index, int test_id); + +// STL container utilities. + +// Returns the number of elements in the given container that satisfy +// the given predicate. +template +inline int CountIf(const Container& c, Predicate predicate) { + // Implemented as an explicit loop since std::count_if() in libCstd on + // Solaris has a non-standard signature. + int count = 0; + for (typename Container::const_iterator it = c.begin(); it != c.end(); ++it) { + if (predicate(*it)) + ++count; + } + return count; +} + +// Applies a function/functor to each element in the container. +template +void ForEach(const Container& c, Functor functor) { + std::for_each(c.begin(), c.end(), functor); +} + +// Returns the i-th element of the vector, or default_value if i is not +// in range [0, v.size()). +template +inline E GetElementOr(const std::vector& v, int i, E default_value) { + return (i < 0 || i >= static_cast(v.size())) ? default_value : v[i]; +} + +// Performs an in-place shuffle of a range of the vector's elements. +// 'begin' and 'end' are element indices as an STL-style range; +// i.e. [begin, end) are shuffled, where 'end' == size() means to +// shuffle to the end of the vector. +template +void ShuffleRange(internal::Random* random, int begin, int end, + std::vector* v) { + const int size = static_cast(v->size()); + GTEST_CHECK_(0 <= begin && begin <= size) + << "Invalid shuffle range start " << begin << ": must be in range [0, " + << size << "]."; + GTEST_CHECK_(begin <= end && end <= size) + << "Invalid shuffle range finish " << end << ": must be in range [" + << begin << ", " << size << "]."; + + // Fisher-Yates shuffle, from + // http://en.wikipedia.org/wiki/Fisher-Yates_shuffle + for (int range_width = end - begin; range_width >= 2; range_width--) { + const int last_in_range = begin + range_width - 1; + const int selected = begin + random->Generate(range_width); + std::swap((*v)[selected], (*v)[last_in_range]); + } +} + +// Performs an in-place shuffle of the vector's elements. +template +inline void Shuffle(internal::Random* random, std::vector* v) { + ShuffleRange(random, 0, static_cast(v->size()), v); +} + +// A function for deleting an object. Handy for being used as a +// functor. +template +static void Delete(T* x) { + delete x; +} + +// A predicate that checks the key of a TestProperty against a known key. +// +// TestPropertyKeyIs is copyable. +class TestPropertyKeyIs { + public: + // Constructor. + // + // TestPropertyKeyIs has NO default constructor. + explicit TestPropertyKeyIs(const std::string& key) : key_(key) {} + + // Returns true iff the test name of test property matches on key_. + bool operator()(const TestProperty& test_property) const { + return test_property.key() == key_; + } + + private: + std::string key_; +}; + +// Class UnitTestOptions. +// +// This class contains functions for processing options the user +// specifies when running the tests. It has only static members. +// +// In most cases, the user can specify an option using either an +// environment variable or a command line flag. E.g. you can set the +// test filter using either GTEST_FILTER or --gtest_filter. If both +// the variable and the flag are present, the latter overrides the +// former. +class GTEST_API_ UnitTestOptions { + public: + // Functions for processing the gtest_output flag. + + // Returns the output format, or "" for normal printed output. + static std::string GetOutputFormat(); + + // Returns the absolute path of the requested output file, or the + // default (test_detail.xml in the original working directory) if + // none was explicitly specified. + static std::string GetAbsolutePathToOutputFile(); + + // Functions for processing the gtest_filter flag. + + // Returns true iff the wildcard pattern matches the string. The + // first ':' or '\0' character in pattern marks the end of it. + // + // This recursive algorithm isn't very efficient, but is clear and + // works well enough for matching test names, which are short. + static bool PatternMatchesString(const char *pattern, const char *str); + + // Returns true iff the user-specified filter matches the test case + // name and the test name. + static bool FilterMatchesTest(const std::string &test_case_name, + const std::string &test_name); + +#if GTEST_OS_WINDOWS + // Function for supporting the gtest_catch_exception flag. + + // Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the + // given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise. + // This function is useful as an __except condition. + static int GTestShouldProcessSEH(DWORD exception_code); +#endif // GTEST_OS_WINDOWS + + // Returns true if "name" matches the ':' separated list of glob-style + // filters in "filter". + static bool MatchesFilter(const std::string& name, const char* filter); +}; + +// Returns the current application's name, removing directory path if that +// is present. Used by UnitTestOptions::GetOutputFile. +GTEST_API_ FilePath GetCurrentExecutableName(); + +// The role interface for getting the OS stack trace as a string. +class OsStackTraceGetterInterface { + public: + OsStackTraceGetterInterface() {} + virtual ~OsStackTraceGetterInterface() {} + + // Returns the current OS stack trace as an std::string. Parameters: + // + // max_depth - the maximum number of stack frames to be included + // in the trace. + // skip_count - the number of top frames to be skipped; doesn't count + // against max_depth. + virtual string CurrentStackTrace(int max_depth, int skip_count) = 0; + + // UponLeavingGTest() should be called immediately before Google Test calls + // user code. It saves some information about the current stack that + // CurrentStackTrace() will use to find and hide Google Test stack frames. + virtual void UponLeavingGTest() = 0; + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetterInterface); +}; + +// A working implementation of the OsStackTraceGetterInterface interface. +class OsStackTraceGetter : public OsStackTraceGetterInterface { + public: + OsStackTraceGetter() : caller_frame_(NULL) {} + + virtual string CurrentStackTrace(int max_depth, int skip_count) + GTEST_LOCK_EXCLUDED_(mutex_); + + virtual void UponLeavingGTest() GTEST_LOCK_EXCLUDED_(mutex_); + + // This string is inserted in place of stack frames that are part of + // Google Test's implementation. + static const char* const kElidedFramesMarker; + + private: + Mutex mutex_; // protects all internal state + + // We save the stack frame below the frame that calls user code. + // We do this because the address of the frame immediately below + // the user code changes between the call to UponLeavingGTest() + // and any calls to CurrentStackTrace() from within the user code. + void* caller_frame_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetter); +}; + +// Information about a Google Test trace point. +struct TraceInfo { + const char* file; + int line; + std::string message; +}; + +// This is the default global test part result reporter used in UnitTestImpl. +// This class should only be used by UnitTestImpl. +class DefaultGlobalTestPartResultReporter + : public TestPartResultReporterInterface { + public: + explicit DefaultGlobalTestPartResultReporter(UnitTestImpl* unit_test); + // Implements the TestPartResultReporterInterface. Reports the test part + // result in the current test. + virtual void ReportTestPartResult(const TestPartResult& result); + + private: + UnitTestImpl* const unit_test_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultGlobalTestPartResultReporter); +}; + +// This is the default per thread test part result reporter used in +// UnitTestImpl. This class should only be used by UnitTestImpl. +class DefaultPerThreadTestPartResultReporter + : public TestPartResultReporterInterface { + public: + explicit DefaultPerThreadTestPartResultReporter(UnitTestImpl* unit_test); + // Implements the TestPartResultReporterInterface. The implementation just + // delegates to the current global test part result reporter of *unit_test_. + virtual void ReportTestPartResult(const TestPartResult& result); + + private: + UnitTestImpl* const unit_test_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultPerThreadTestPartResultReporter); +}; + +// The private implementation of the UnitTest class. We don't protect +// the methods under a mutex, as this class is not accessible by a +// user and the UnitTest class that delegates work to this class does +// proper locking. +class GTEST_API_ UnitTestImpl { + public: + explicit UnitTestImpl(UnitTest* parent); + virtual ~UnitTestImpl(); + + // There are two different ways to register your own TestPartResultReporter. + // You can register your own repoter to listen either only for test results + // from the current thread or for results from all threads. + // By default, each per-thread test result repoter just passes a new + // TestPartResult to the global test result reporter, which registers the + // test part result for the currently running test. + + // Returns the global test part result reporter. + TestPartResultReporterInterface* GetGlobalTestPartResultReporter(); + + // Sets the global test part result reporter. + void SetGlobalTestPartResultReporter( + TestPartResultReporterInterface* reporter); + + // Returns the test part result reporter for the current thread. + TestPartResultReporterInterface* GetTestPartResultReporterForCurrentThread(); + + // Sets the test part result reporter for the current thread. + void SetTestPartResultReporterForCurrentThread( + TestPartResultReporterInterface* reporter); + + // Gets the number of successful test cases. + int successful_test_case_count() const; + + // Gets the number of failed test cases. + int failed_test_case_count() const; + + // Gets the number of all test cases. + int total_test_case_count() const; + + // Gets the number of all test cases that contain at least one test + // that should run. + int test_case_to_run_count() const; + + // Gets the number of successful tests. + int successful_test_count() const; + + // Gets the number of failed tests. + int failed_test_count() const; + + // Gets the number of disabled tests that will be reported in the XML report. + int reportable_disabled_test_count() const; + + // Gets the number of disabled tests. + int disabled_test_count() const; + + // Gets the number of tests to be printed in the XML report. + int reportable_test_count() const; + + // Gets the number of all tests. + int total_test_count() const; + + // Gets the number of tests that should run. + int test_to_run_count() const; + + // Gets the time of the test program start, in ms from the start of the + // UNIX epoch. + TimeInMillis start_timestamp() const { return start_timestamp_; } + + // Gets the elapsed time, in milliseconds. + TimeInMillis elapsed_time() const { return elapsed_time_; } + + // Returns true iff the unit test passed (i.e. all test cases passed). + bool Passed() const { return !Failed(); } + + // Returns true iff the unit test failed (i.e. some test case failed + // or something outside of all tests failed). + bool Failed() const { + return failed_test_case_count() > 0 || ad_hoc_test_result()->Failed(); + } + + // Gets the i-th test case among all the test cases. i can range from 0 to + // total_test_case_count() - 1. If i is not in that range, returns NULL. + const TestCase* GetTestCase(int i) const { + const int index = GetElementOr(test_case_indices_, i, -1); + return index < 0 ? NULL : test_cases_[i]; + } + + // Gets the i-th test case among all the test cases. i can range from 0 to + // total_test_case_count() - 1. If i is not in that range, returns NULL. + TestCase* GetMutableTestCase(int i) { + const int index = GetElementOr(test_case_indices_, i, -1); + return index < 0 ? NULL : test_cases_[index]; + } + + // Provides access to the event listener list. + TestEventListeners* listeners() { return &listeners_; } + + // Returns the TestResult for the test that's currently running, or + // the TestResult for the ad hoc test if no test is running. + TestResult* current_test_result(); + + // Returns the TestResult for the ad hoc test. + const TestResult* ad_hoc_test_result() const { return &ad_hoc_test_result_; } + + // Sets the OS stack trace getter. + // + // Does nothing if the input and the current OS stack trace getter + // are the same; otherwise, deletes the old getter and makes the + // input the current getter. + void set_os_stack_trace_getter(OsStackTraceGetterInterface* getter); + + // Returns the current OS stack trace getter if it is not NULL; + // otherwise, creates an OsStackTraceGetter, makes it the current + // getter, and returns it. + OsStackTraceGetterInterface* os_stack_trace_getter(); + + // Returns the current OS stack trace as an std::string. + // + // The maximum number of stack frames to be included is specified by + // the gtest_stack_trace_depth flag. The skip_count parameter + // specifies the number of top frames to be skipped, which doesn't + // count against the number of frames to be included. + // + // For example, if Foo() calls Bar(), which in turn calls + // CurrentOsStackTraceExceptTop(1), Foo() will be included in the + // trace but Bar() and CurrentOsStackTraceExceptTop() won't. + std::string CurrentOsStackTraceExceptTop(int skip_count) GTEST_NO_INLINE_; + + // Finds and returns a TestCase with the given name. If one doesn't + // exist, creates one and returns it. + // + // Arguments: + // + // test_case_name: name of the test case + // type_param: the name of the test's type parameter, or NULL if + // this is not a typed or a type-parameterized test. + // set_up_tc: pointer to the function that sets up the test case + // tear_down_tc: pointer to the function that tears down the test case + TestCase* GetTestCase(const char* test_case_name, + const char* type_param, + Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc); + + // Adds a TestInfo to the unit test. + // + // Arguments: + // + // set_up_tc: pointer to the function that sets up the test case + // tear_down_tc: pointer to the function that tears down the test case + // test_info: the TestInfo object + void AddTestInfo(Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc, + TestInfo* test_info) { + // In order to support thread-safe death tests, we need to + // remember the original working directory when the test program + // was first invoked. We cannot do this in RUN_ALL_TESTS(), as + // the user may have changed the current directory before calling + // RUN_ALL_TESTS(). Therefore we capture the current directory in + // AddTestInfo(), which is called to register a TEST or TEST_F + // before main() is reached. + if (original_working_dir_.IsEmpty()) { + original_working_dir_.Set(FilePath::GetCurrentDir()); + GTEST_CHECK_(!original_working_dir_.IsEmpty()) + << "Failed to get the current working directory."; + } + + GetTestCase(test_info->test_case_name(), + test_info->type_param(), + set_up_tc, + tear_down_tc)->AddTestInfo(test_info); + } + +#if GTEST_HAS_PARAM_TEST + // Returns ParameterizedTestCaseRegistry object used to keep track of + // value-parameterized tests and instantiate and register them. + internal::ParameterizedTestCaseRegistry& parameterized_test_registry() { + return parameterized_test_registry_; + } +#endif // GTEST_HAS_PARAM_TEST + + // Sets the TestCase object for the test that's currently running. + void set_current_test_case(TestCase* a_current_test_case) { + current_test_case_ = a_current_test_case; + } + + // Sets the TestInfo object for the test that's currently running. If + // current_test_info is NULL, the assertion results will be stored in + // ad_hoc_test_result_. + void set_current_test_info(TestInfo* a_current_test_info) { + current_test_info_ = a_current_test_info; + } + + // Registers all parameterized tests defined using TEST_P and + // INSTANTIATE_TEST_CASE_P, creating regular tests for each test/parameter + // combination. This method can be called more then once; it has guards + // protecting from registering the tests more then once. If + // value-parameterized tests are disabled, RegisterParameterizedTests is + // present but does nothing. + void RegisterParameterizedTests(); + + // Runs all tests in this UnitTest object, prints the result, and + // returns true if all tests are successful. If any exception is + // thrown during a test, this test is considered to be failed, but + // the rest of the tests will still be run. + bool RunAllTests(); + + // Clears the results of all tests, except the ad hoc tests. + void ClearNonAdHocTestResult() { + ForEach(test_cases_, TestCase::ClearTestCaseResult); + } + + // Clears the results of ad-hoc test assertions. + void ClearAdHocTestResult() { + ad_hoc_test_result_.Clear(); + } + + // Adds a TestProperty to the current TestResult object when invoked in a + // context of a test or a test case, or to the global property set. If the + // result already contains a property with the same key, the value will be + // updated. + void RecordProperty(const TestProperty& test_property); + + enum ReactionToSharding { + HONOR_SHARDING_PROTOCOL, + IGNORE_SHARDING_PROTOCOL + }; + + // Matches the full name of each test against the user-specified + // filter to decide whether the test should run, then records the + // result in each TestCase and TestInfo object. + // If shard_tests == HONOR_SHARDING_PROTOCOL, further filters tests + // based on sharding variables in the environment. + // Returns the number of tests that should run. + int FilterTests(ReactionToSharding shard_tests); + + // Prints the names of the tests matching the user-specified filter flag. + void ListTestsMatchingFilter(); + + const TestCase* current_test_case() const { return current_test_case_; } + TestInfo* current_test_info() { return current_test_info_; } + const TestInfo* current_test_info() const { return current_test_info_; } + + // Returns the vector of environments that need to be set-up/torn-down + // before/after the tests are run. + std::vector& environments() { return environments_; } + + // Getters for the per-thread Google Test trace stack. + std::vector& gtest_trace_stack() { + return *(gtest_trace_stack_.pointer()); + } + const std::vector& gtest_trace_stack() const { + return gtest_trace_stack_.get(); + } + +#if GTEST_HAS_DEATH_TEST + void InitDeathTestSubprocessControlInfo() { + internal_run_death_test_flag_.reset(ParseInternalRunDeathTestFlag()); + } + // Returns a pointer to the parsed --gtest_internal_run_death_test + // flag, or NULL if that flag was not specified. + // This information is useful only in a death test child process. + // Must not be called before a call to InitGoogleTest. + const InternalRunDeathTestFlag* internal_run_death_test_flag() const { + return internal_run_death_test_flag_.get(); + } + + // Returns a pointer to the current death test factory. + internal::DeathTestFactory* death_test_factory() { + return death_test_factory_.get(); + } + + void SuppressTestEventsIfInSubprocess(); + + friend class ReplaceDeathTestFactory; +#endif // GTEST_HAS_DEATH_TEST + + // Initializes the event listener performing XML output as specified by + // UnitTestOptions. Must not be called before InitGoogleTest. + void ConfigureXmlOutput(); + +#if GTEST_CAN_STREAM_RESULTS_ + // Initializes the event listener for streaming test results to a socket. + // Must not be called before InitGoogleTest. + void ConfigureStreamingOutput(); +#endif + + // Performs initialization dependent upon flag values obtained in + // ParseGoogleTestFlagsOnly. Is called from InitGoogleTest after the call to + // ParseGoogleTestFlagsOnly. In case a user neglects to call InitGoogleTest + // this function is also called from RunAllTests. Since this function can be + // called more than once, it has to be idempotent. + void PostFlagParsingInit(); + + // Gets the random seed used at the start of the current test iteration. + int random_seed() const { return random_seed_; } + + // Gets the random number generator. + internal::Random* random() { return &random_; } + + // Shuffles all test cases, and the tests within each test case, + // making sure that death tests are still run first. + void ShuffleTests(); + + // Restores the test cases and tests to their order before the first shuffle. + void UnshuffleTests(); + + // Returns the value of GTEST_FLAG(catch_exceptions) at the moment + // UnitTest::Run() starts. + bool catch_exceptions() const { return catch_exceptions_; } + + private: + friend class ::testing::UnitTest; + + // Used by UnitTest::Run() to capture the state of + // GTEST_FLAG(catch_exceptions) at the moment it starts. + void set_catch_exceptions(bool value) { catch_exceptions_ = value; } + + // The UnitTest object that owns this implementation object. + UnitTest* const parent_; + + // The working directory when the first TEST() or TEST_F() was + // executed. + internal::FilePath original_working_dir_; + + // The default test part result reporters. + DefaultGlobalTestPartResultReporter default_global_test_part_result_reporter_; + DefaultPerThreadTestPartResultReporter + default_per_thread_test_part_result_reporter_; + + // Points to (but doesn't own) the global test part result reporter. + TestPartResultReporterInterface* global_test_part_result_repoter_; + + // Protects read and write access to global_test_part_result_reporter_. + internal::Mutex global_test_part_result_reporter_mutex_; + + // Points to (but doesn't own) the per-thread test part result reporter. + internal::ThreadLocal + per_thread_test_part_result_reporter_; + + // The vector of environments that need to be set-up/torn-down + // before/after the tests are run. + std::vector environments_; + + // The vector of TestCases in their original order. It owns the + // elements in the vector. + std::vector test_cases_; + + // Provides a level of indirection for the test case list to allow + // easy shuffling and restoring the test case order. The i-th + // element of this vector is the index of the i-th test case in the + // shuffled order. + std::vector test_case_indices_; + +#if GTEST_HAS_PARAM_TEST + // ParameterizedTestRegistry object used to register value-parameterized + // tests. + internal::ParameterizedTestCaseRegistry parameterized_test_registry_; + + // Indicates whether RegisterParameterizedTests() has been called already. + bool parameterized_tests_registered_; +#endif // GTEST_HAS_PARAM_TEST + + // Index of the last death test case registered. Initially -1. + int last_death_test_case_; + + // This points to the TestCase for the currently running test. It + // changes as Google Test goes through one test case after another. + // When no test is running, this is set to NULL and Google Test + // stores assertion results in ad_hoc_test_result_. Initially NULL. + TestCase* current_test_case_; + + // This points to the TestInfo for the currently running test. It + // changes as Google Test goes through one test after another. When + // no test is running, this is set to NULL and Google Test stores + // assertion results in ad_hoc_test_result_. Initially NULL. + TestInfo* current_test_info_; + + // Normally, a user only writes assertions inside a TEST or TEST_F, + // or inside a function called by a TEST or TEST_F. Since Google + // Test keeps track of which test is current running, it can + // associate such an assertion with the test it belongs to. + // + // If an assertion is encountered when no TEST or TEST_F is running, + // Google Test attributes the assertion result to an imaginary "ad hoc" + // test, and records the result in ad_hoc_test_result_. + TestResult ad_hoc_test_result_; + + // The list of event listeners that can be used to track events inside + // Google Test. + TestEventListeners listeners_; + + // The OS stack trace getter. Will be deleted when the UnitTest + // object is destructed. By default, an OsStackTraceGetter is used, + // but the user can set this field to use a custom getter if that is + // desired. + OsStackTraceGetterInterface* os_stack_trace_getter_; + + // True iff PostFlagParsingInit() has been called. + bool post_flag_parse_init_performed_; + + // The random number seed used at the beginning of the test run. + int random_seed_; + + // Our random number generator. + internal::Random random_; + + // The time of the test program start, in ms from the start of the + // UNIX epoch. + TimeInMillis start_timestamp_; + + // How long the test took to run, in milliseconds. + TimeInMillis elapsed_time_; + +#if GTEST_HAS_DEATH_TEST + // The decomposed components of the gtest_internal_run_death_test flag, + // parsed when RUN_ALL_TESTS is called. + internal::scoped_ptr internal_run_death_test_flag_; + internal::scoped_ptr death_test_factory_; +#endif // GTEST_HAS_DEATH_TEST + + // A per-thread stack of traces created by the SCOPED_TRACE() macro. + internal::ThreadLocal > gtest_trace_stack_; + + // The value of GTEST_FLAG(catch_exceptions) at the moment RunAllTests() + // starts. + bool catch_exceptions_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTestImpl); +}; // class UnitTestImpl + +// Convenience function for accessing the global UnitTest +// implementation object. +inline UnitTestImpl* GetUnitTestImpl() { + return UnitTest::GetInstance()->impl(); +} + +#if GTEST_USES_SIMPLE_RE + +// Internal helper functions for implementing the simple regular +// expression matcher. +GTEST_API_ bool IsInSet(char ch, const char* str); +GTEST_API_ bool IsAsciiDigit(char ch); +GTEST_API_ bool IsAsciiPunct(char ch); +GTEST_API_ bool IsRepeat(char ch); +GTEST_API_ bool IsAsciiWhiteSpace(char ch); +GTEST_API_ bool IsAsciiWordChar(char ch); +GTEST_API_ bool IsValidEscape(char ch); +GTEST_API_ bool AtomMatchesChar(bool escaped, char pattern, char ch); +GTEST_API_ bool ValidateRegex(const char* regex); +GTEST_API_ bool MatchRegexAtHead(const char* regex, const char* str); +GTEST_API_ bool MatchRepetitionAndRegexAtHead( + bool escaped, char ch, char repeat, const char* regex, const char* str); +GTEST_API_ bool MatchRegexAnywhere(const char* regex, const char* str); + +#endif // GTEST_USES_SIMPLE_RE + +// Parses the command line for Google Test flags, without initializing +// other parts of Google Test. +GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, char** argv); +GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv); + +#if GTEST_HAS_DEATH_TEST + +// Returns the message describing the last system error, regardless of the +// platform. +GTEST_API_ std::string GetLastErrnoDescription(); + +# if GTEST_OS_WINDOWS +// Provides leak-safe Windows kernel handle ownership. +class AutoHandle { + public: + AutoHandle() : handle_(INVALID_HANDLE_VALUE) {} + explicit AutoHandle(HANDLE handle) : handle_(handle) {} + + ~AutoHandle() { Reset(); } + + HANDLE Get() const { return handle_; } + void Reset() { Reset(INVALID_HANDLE_VALUE); } + void Reset(HANDLE handle) { + if (handle != handle_) { + if (handle_ != INVALID_HANDLE_VALUE) + ::CloseHandle(handle_); + handle_ = handle; + } + } + + private: + HANDLE handle_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(AutoHandle); +}; +# endif // GTEST_OS_WINDOWS + +// Attempts to parse a string into a positive integer pointed to by the +// number parameter. Returns true if that is possible. +// GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we can use +// it here. +template +bool ParseNaturalNumber(const ::std::string& str, Integer* number) { + // Fail fast if the given string does not begin with a digit; + // this bypasses strtoXXX's "optional leading whitespace and plus + // or minus sign" semantics, which are undesirable here. + if (str.empty() || !IsDigit(str[0])) { + return false; + } + errno = 0; + + char* end; + // BiggestConvertible is the largest integer type that system-provided + // string-to-number conversion routines can return. + +# if GTEST_OS_WINDOWS && !defined(__GNUC__) + + // MSVC and C++ Builder define __int64 instead of the standard long long. + typedef unsigned __int64 BiggestConvertible; + const BiggestConvertible parsed = _strtoui64(str.c_str(), &end, 10); + +# else + + typedef unsigned long long BiggestConvertible; // NOLINT + const BiggestConvertible parsed = strtoull(str.c_str(), &end, 10); + +# endif // GTEST_OS_WINDOWS && !defined(__GNUC__) + + const bool parse_success = *end == '\0' && errno == 0; + + // TODO(vladl@google.com): Convert this to compile time assertion when it is + // available. + GTEST_CHECK_(sizeof(Integer) <= sizeof(parsed)); + + const Integer result = static_cast(parsed); + if (parse_success && static_cast(result) == parsed) { + *number = result; + return true; + } + return false; +} +#endif // GTEST_HAS_DEATH_TEST + +// TestResult contains some private methods that should be hidden from +// Google Test user but are required for testing. This class allow our tests +// to access them. +// +// This class is supplied only for the purpose of testing Google Test's own +// constructs. Do not use it in user tests, either directly or indirectly. +class TestResultAccessor { + public: + static void RecordProperty(TestResult* test_result, + const std::string& xml_element, + const TestProperty& property) { + test_result->RecordProperty(xml_element, property); + } + + static void ClearTestPartResults(TestResult* test_result) { + test_result->ClearTestPartResults(); + } + + static const std::vector& test_part_results( + const TestResult& test_result) { + return test_result.test_part_results(); + } +}; + +#if GTEST_CAN_STREAM_RESULTS_ + +// Streams test results to the given port on the given host machine. +class StreamingListener : public EmptyTestEventListener { + public: + // Abstract base class for writing strings to a socket. + class AbstractSocketWriter { + public: + virtual ~AbstractSocketWriter() {} + + // Sends a string to the socket. + virtual void Send(const string& message) = 0; + + // Closes the socket. + virtual void CloseConnection() {} + + // Sends a string and a newline to the socket. + void SendLn(const string& message) { + Send(message + "\n"); + } + }; + + // Concrete class for actually writing strings to a socket. + class SocketWriter : public AbstractSocketWriter { + public: + SocketWriter(const string& host, const string& port) + : sockfd_(-1), host_name_(host), port_num_(port) { + MakeConnection(); + } + + virtual ~SocketWriter() { + if (sockfd_ != -1) + CloseConnection(); + } + + // Sends a string to the socket. + virtual void Send(const string& message) { + GTEST_CHECK_(sockfd_ != -1) + << "Send() can be called only when there is a connection."; + + const int len = static_cast(message.length()); + if (write(sockfd_, message.c_str(), len) != len) { + GTEST_LOG_(WARNING) + << "stream_result_to: failed to stream to " + << host_name_ << ":" << port_num_; + } + } + + private: + // Creates a client socket and connects to the server. + void MakeConnection(); + + // Closes the socket. + void CloseConnection() { + GTEST_CHECK_(sockfd_ != -1) + << "CloseConnection() can be called only when there is a connection."; + + close(sockfd_); + sockfd_ = -1; + } + + int sockfd_; // socket file descriptor + const string host_name_; + const string port_num_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(SocketWriter); + }; // class SocketWriter + + // Escapes '=', '&', '%', and '\n' characters in str as "%xx". + static string UrlEncode(const char* str); + + StreamingListener(const string& host, const string& port) + : socket_writer_(new SocketWriter(host, port)) { Start(); } + + explicit StreamingListener(AbstractSocketWriter* socket_writer) + : socket_writer_(socket_writer) { Start(); } + + void OnTestProgramStart(const UnitTest& /* unit_test */) { + SendLn("event=TestProgramStart"); + } + + void OnTestProgramEnd(const UnitTest& unit_test) { + // Note that Google Test current only report elapsed time for each + // test iteration, not for the entire test program. + SendLn("event=TestProgramEnd&passed=" + FormatBool(unit_test.Passed())); + + // Notify the streaming server to stop. + socket_writer_->CloseConnection(); + } + + void OnTestIterationStart(const UnitTest& /* unit_test */, int iteration) { + SendLn("event=TestIterationStart&iteration=" + + StreamableToString(iteration)); + } + + void OnTestIterationEnd(const UnitTest& unit_test, int /* iteration */) { + SendLn("event=TestIterationEnd&passed=" + + FormatBool(unit_test.Passed()) + "&elapsed_time=" + + StreamableToString(unit_test.elapsed_time()) + "ms"); + } + + void OnTestCaseStart(const TestCase& test_case) { + SendLn(std::string("event=TestCaseStart&name=") + test_case.name()); + } + + void OnTestCaseEnd(const TestCase& test_case) { + SendLn("event=TestCaseEnd&passed=" + FormatBool(test_case.Passed()) + + "&elapsed_time=" + StreamableToString(test_case.elapsed_time()) + + "ms"); + } + + void OnTestStart(const TestInfo& test_info) { + SendLn(std::string("event=TestStart&name=") + test_info.name()); + } + + void OnTestEnd(const TestInfo& test_info) { + SendLn("event=TestEnd&passed=" + + FormatBool((test_info.result())->Passed()) + + "&elapsed_time=" + + StreamableToString((test_info.result())->elapsed_time()) + "ms"); + } + + void OnTestPartResult(const TestPartResult& test_part_result) { + const char* file_name = test_part_result.file_name(); + if (file_name == NULL) + file_name = ""; + SendLn("event=TestPartResult&file=" + UrlEncode(file_name) + + "&line=" + StreamableToString(test_part_result.line_number()) + + "&message=" + UrlEncode(test_part_result.message())); + } + + private: + // Sends the given message and a newline to the socket. + void SendLn(const string& message) { socket_writer_->SendLn(message); } + + // Called at the start of streaming to notify the receiver what + // protocol we are using. + void Start() { SendLn("gtest_streaming_protocol_version=1.0"); } + + string FormatBool(bool value) { return value ? "1" : "0"; } + + const scoped_ptr socket_writer_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamingListener); +}; // class StreamingListener + +#endif // GTEST_CAN_STREAM_RESULTS_ + +} // namespace internal +} // namespace testing + +#endif // GTEST_SRC_GTEST_INTERNAL_INL_H_ +#undef GTEST_IMPLEMENTATION_ + +#if GTEST_OS_WINDOWS +# define vsnprintf _vsnprintf +#endif // GTEST_OS_WINDOWS + +namespace testing { + +using internal::CountIf; +using internal::ForEach; +using internal::GetElementOr; +using internal::Shuffle; + +// Constants. + +// A test whose test case name or test name matches this filter is +// disabled and not run. +static const char kDisableTestFilter[] = "DISABLED_*:*/DISABLED_*"; + +// A test case whose name matches this filter is considered a death +// test case and will be run before test cases whose name doesn't +// match this filter. +static const char kDeathTestCaseFilter[] = "*DeathTest:*DeathTest/*"; + +// A test filter that matches everything. +static const char kUniversalFilter[] = "*"; + +// The default output file for XML output. +static const char kDefaultOutputFile[] = "test_detail.xml"; + +// The environment variable name for the test shard index. +static const char kTestShardIndex[] = "GTEST_SHARD_INDEX"; +// The environment variable name for the total number of test shards. +static const char kTestTotalShards[] = "GTEST_TOTAL_SHARDS"; +// The environment variable name for the test shard status file. +static const char kTestShardStatusFile[] = "GTEST_SHARD_STATUS_FILE"; + +namespace internal { + +// The text used in failure messages to indicate the start of the +// stack trace. +const char kStackTraceMarker[] = "\nStack trace:\n"; + +// g_help_flag is true iff the --help flag or an equivalent form is +// specified on the command line. +bool g_help_flag = false; + +} // namespace internal + +static const char* GetDefaultFilter() { + return kUniversalFilter; +} + +GTEST_DEFINE_bool_( + also_run_disabled_tests, + internal::BoolFromGTestEnv("also_run_disabled_tests", false), + "Run disabled tests too, in addition to the tests normally being run."); + +GTEST_DEFINE_bool_( + break_on_failure, + internal::BoolFromGTestEnv("break_on_failure", false), + "True iff a failed assertion should be a debugger break-point."); + +GTEST_DEFINE_bool_( + catch_exceptions, + internal::BoolFromGTestEnv("catch_exceptions", true), + "True iff " GTEST_NAME_ + " should catch exceptions and treat them as test failures."); + +GTEST_DEFINE_string_( + color, + internal::StringFromGTestEnv("color", "auto"), + "Whether to use colors in the output. Valid values: yes, no, " + "and auto. 'auto' means to use colors if the output is " + "being sent to a terminal and the TERM environment variable " + "is set to a terminal type that supports colors."); + +GTEST_DEFINE_string_( + filter, + internal::StringFromGTestEnv("filter", GetDefaultFilter()), + "A colon-separated list of glob (not regex) patterns " + "for filtering the tests to run, optionally followed by a " + "'-' and a : separated list of negative patterns (tests to " + "exclude). A test is run if it matches one of the positive " + "patterns and does not match any of the negative patterns."); + +GTEST_DEFINE_bool_(list_tests, false, + "List all tests without running them."); + +GTEST_DEFINE_string_( + output, + internal::StringFromGTestEnv("output", ""), + "A format (currently must be \"xml\"), optionally followed " + "by a colon and an output file name or directory. A directory " + "is indicated by a trailing pathname separator. " + "Examples: \"xml:filename.xml\", \"xml::directoryname/\". " + "If a directory is specified, output files will be created " + "within that directory, with file-names based on the test " + "executable's name and, if necessary, made unique by adding " + "digits."); + +GTEST_DEFINE_bool_( + print_time, + internal::BoolFromGTestEnv("print_time", true), + "True iff " GTEST_NAME_ + " should display elapsed time in text output."); + +GTEST_DEFINE_int32_( + random_seed, + internal::Int32FromGTestEnv("random_seed", 0), + "Random number seed to use when shuffling test orders. Must be in range " + "[1, 99999], or 0 to use a seed based on the current time."); + +GTEST_DEFINE_int32_( + repeat, + internal::Int32FromGTestEnv("repeat", 1), + "How many times to repeat each test. Specify a negative number " + "for repeating forever. Useful for shaking out flaky tests."); + +GTEST_DEFINE_bool_( + show_internal_stack_frames, false, + "True iff " GTEST_NAME_ " should include internal stack frames when " + "printing test failure stack traces."); + +GTEST_DEFINE_bool_( + shuffle, + internal::BoolFromGTestEnv("shuffle", false), + "True iff " GTEST_NAME_ + " should randomize tests' order on every run."); + +GTEST_DEFINE_int32_( + stack_trace_depth, + internal::Int32FromGTestEnv("stack_trace_depth", kMaxStackTraceDepth), + "The maximum number of stack frames to print when an " + "assertion fails. The valid range is 0 through 100, inclusive."); + +GTEST_DEFINE_string_( + stream_result_to, + internal::StringFromGTestEnv("stream_result_to", ""), + "This flag specifies the host name and the port number on which to stream " + "test results. Example: \"localhost:555\". The flag is effective only on " + "Linux."); + +GTEST_DEFINE_bool_( + throw_on_failure, + internal::BoolFromGTestEnv("throw_on_failure", false), + "When this flag is specified, a failed assertion will throw an exception " + "if exceptions are enabled or exit the program with a non-zero code " + "otherwise."); + +namespace internal { + +// Generates a random number from [0, range), using a Linear +// Congruential Generator (LCG). Crashes if 'range' is 0 or greater +// than kMaxRange. +UInt32 Random::Generate(UInt32 range) { + // These constants are the same as are used in glibc's rand(3). + state_ = (1103515245U*state_ + 12345U) % kMaxRange; + + GTEST_CHECK_(range > 0) + << "Cannot generate a number in the range [0, 0)."; + GTEST_CHECK_(range <= kMaxRange) + << "Generation of a number in [0, " << range << ") was requested, " + << "but this can only generate numbers in [0, " << kMaxRange << ")."; + + // Converting via modulus introduces a bit of downward bias, but + // it's simple, and a linear congruential generator isn't too good + // to begin with. + return state_ % range; +} + +// GTestIsInitialized() returns true iff the user has initialized +// Google Test. Useful for catching the user mistake of not initializing +// Google Test before calling RUN_ALL_TESTS(). +// +// A user must call testing::InitGoogleTest() to initialize Google +// Test. g_init_gtest_count is set to the number of times +// InitGoogleTest() has been called. We don't protect this variable +// under a mutex as it is only accessed in the main thread. +GTEST_API_ int g_init_gtest_count = 0; +static bool GTestIsInitialized() { return g_init_gtest_count != 0; } + +// Iterates over a vector of TestCases, keeping a running sum of the +// results of calling a given int-returning method on each. +// Returns the sum. +static int SumOverTestCaseList(const std::vector& case_list, + int (TestCase::*method)() const) { + int sum = 0; + for (size_t i = 0; i < case_list.size(); i++) { + sum += (case_list[i]->*method)(); + } + return sum; +} + +// Returns true iff the test case passed. +static bool TestCasePassed(const TestCase* test_case) { + return test_case->should_run() && test_case->Passed(); +} + +// Returns true iff the test case failed. +static bool TestCaseFailed(const TestCase* test_case) { + return test_case->should_run() && test_case->Failed(); +} + +// Returns true iff test_case contains at least one test that should +// run. +static bool ShouldRunTestCase(const TestCase* test_case) { + return test_case->should_run(); +} + +// AssertHelper constructor. +AssertHelper::AssertHelper(TestPartResult::Type type, + const char* file, + int line, + const char* message) + : data_(new AssertHelperData(type, file, line, message)) { +} + +AssertHelper::~AssertHelper() { + delete data_; +} + +// Message assignment, for assertion streaming support. +void AssertHelper::operator=(const Message& message) const { + UnitTest::GetInstance()-> + AddTestPartResult(data_->type, data_->file, data_->line, + AppendUserMessage(data_->message, message), + UnitTest::GetInstance()->impl() + ->CurrentOsStackTraceExceptTop(1) + // Skips the stack frame for this function itself. + ); // NOLINT +} + +// Mutex for linked pointers. +GTEST_API_ GTEST_DEFINE_STATIC_MUTEX_(g_linked_ptr_mutex); + +// Application pathname gotten in InitGoogleTest. +std::string g_executable_path; + +// Returns the current application's name, removing directory path if that +// is present. +FilePath GetCurrentExecutableName() { + FilePath result; + +#if GTEST_OS_WINDOWS + result.Set(FilePath(g_executable_path).RemoveExtension("exe")); +#else + result.Set(FilePath(g_executable_path)); +#endif // GTEST_OS_WINDOWS + + return result.RemoveDirectoryName(); +} + +// Functions for processing the gtest_output flag. + +// Returns the output format, or "" for normal printed output. +std::string UnitTestOptions::GetOutputFormat() { + const char* const gtest_output_flag = GTEST_FLAG(output).c_str(); + if (gtest_output_flag == NULL) return std::string(""); + + const char* const colon = strchr(gtest_output_flag, ':'); + return (colon == NULL) ? + std::string(gtest_output_flag) : + std::string(gtest_output_flag, colon - gtest_output_flag); +} + +// Returns the name of the requested output file, or the default if none +// was explicitly specified. +std::string UnitTestOptions::GetAbsolutePathToOutputFile() { + const char* const gtest_output_flag = GTEST_FLAG(output).c_str(); + if (gtest_output_flag == NULL) + return ""; + + const char* const colon = strchr(gtest_output_flag, ':'); + if (colon == NULL) + return internal::FilePath::ConcatPaths( + internal::FilePath( + UnitTest::GetInstance()->original_working_dir()), + internal::FilePath(kDefaultOutputFile)).string(); + + internal::FilePath output_name(colon + 1); + if (!output_name.IsAbsolutePath()) + // TODO(wan@google.com): on Windows \some\path is not an absolute + // path (as its meaning depends on the current drive), yet the + // following logic for turning it into an absolute path is wrong. + // Fix it. + output_name = internal::FilePath::ConcatPaths( + internal::FilePath(UnitTest::GetInstance()->original_working_dir()), + internal::FilePath(colon + 1)); + + if (!output_name.IsDirectory()) + return output_name.string(); + + internal::FilePath result(internal::FilePath::GenerateUniqueFileName( + output_name, internal::GetCurrentExecutableName(), + GetOutputFormat().c_str())); + return result.string(); +} + +// Returns true iff the wildcard pattern matches the string. The +// first ':' or '\0' character in pattern marks the end of it. +// +// This recursive algorithm isn't very efficient, but is clear and +// works well enough for matching test names, which are short. +bool UnitTestOptions::PatternMatchesString(const char *pattern, + const char *str) { + switch (*pattern) { + case '\0': + case ':': // Either ':' or '\0' marks the end of the pattern. + return *str == '\0'; + case '?': // Matches any single character. + return *str != '\0' && PatternMatchesString(pattern + 1, str + 1); + case '*': // Matches any string (possibly empty) of characters. + return (*str != '\0' && PatternMatchesString(pattern, str + 1)) || + PatternMatchesString(pattern + 1, str); + default: // Non-special character. Matches itself. + return *pattern == *str && + PatternMatchesString(pattern + 1, str + 1); + } +} + +bool UnitTestOptions::MatchesFilter( + const std::string& name, const char* filter) { + const char *cur_pattern = filter; + for (;;) { + if (PatternMatchesString(cur_pattern, name.c_str())) { + return true; + } + + // Finds the next pattern in the filter. + cur_pattern = strchr(cur_pattern, ':'); + + // Returns if no more pattern can be found. + if (cur_pattern == NULL) { + return false; + } + + // Skips the pattern separater (the ':' character). + cur_pattern++; + } +} + +// Returns true iff the user-specified filter matches the test case +// name and the test name. +bool UnitTestOptions::FilterMatchesTest(const std::string &test_case_name, + const std::string &test_name) { + const std::string& full_name = test_case_name + "." + test_name.c_str(); + + // Split --gtest_filter at '-', if there is one, to separate into + // positive filter and negative filter portions + const char* const p = GTEST_FLAG(filter).c_str(); + const char* const dash = strchr(p, '-'); + std::string positive; + std::string negative; + if (dash == NULL) { + positive = GTEST_FLAG(filter).c_str(); // Whole string is a positive filter + negative = ""; + } else { + positive = std::string(p, dash); // Everything up to the dash + negative = std::string(dash + 1); // Everything after the dash + if (positive.empty()) { + // Treat '-test1' as the same as '*-test1' + positive = kUniversalFilter; + } + } + + // A filter is a colon-separated list of patterns. It matches a + // test if any pattern in it matches the test. + return (MatchesFilter(full_name, positive.c_str()) && + !MatchesFilter(full_name, negative.c_str())); +} + +#if GTEST_HAS_SEH +// Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the +// given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise. +// This function is useful as an __except condition. +int UnitTestOptions::GTestShouldProcessSEH(DWORD exception_code) { + // Google Test should handle a SEH exception if: + // 1. the user wants it to, AND + // 2. this is not a breakpoint exception, AND + // 3. this is not a C++ exception (VC++ implements them via SEH, + // apparently). + // + // SEH exception code for C++ exceptions. + // (see http://support.microsoft.com/kb/185294 for more information). + const DWORD kCxxExceptionCode = 0xe06d7363; + + bool should_handle = true; + + if (!GTEST_FLAG(catch_exceptions)) + should_handle = false; + else if (exception_code == EXCEPTION_BREAKPOINT) + should_handle = false; + else if (exception_code == kCxxExceptionCode) + should_handle = false; + + return should_handle ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH; +} +#endif // GTEST_HAS_SEH + +} // namespace internal + +// The c'tor sets this object as the test part result reporter used by +// Google Test. The 'result' parameter specifies where to report the +// results. Intercepts only failures from the current thread. +ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter( + TestPartResultArray* result) + : intercept_mode_(INTERCEPT_ONLY_CURRENT_THREAD), + result_(result) { + Init(); +} + +// The c'tor sets this object as the test part result reporter used by +// Google Test. The 'result' parameter specifies where to report the +// results. +ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter( + InterceptMode intercept_mode, TestPartResultArray* result) + : intercept_mode_(intercept_mode), + result_(result) { + Init(); +} + +void ScopedFakeTestPartResultReporter::Init() { + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + if (intercept_mode_ == INTERCEPT_ALL_THREADS) { + old_reporter_ = impl->GetGlobalTestPartResultReporter(); + impl->SetGlobalTestPartResultReporter(this); + } else { + old_reporter_ = impl->GetTestPartResultReporterForCurrentThread(); + impl->SetTestPartResultReporterForCurrentThread(this); + } +} + +// The d'tor restores the test part result reporter used by Google Test +// before. +ScopedFakeTestPartResultReporter::~ScopedFakeTestPartResultReporter() { + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + if (intercept_mode_ == INTERCEPT_ALL_THREADS) { + impl->SetGlobalTestPartResultReporter(old_reporter_); + } else { + impl->SetTestPartResultReporterForCurrentThread(old_reporter_); + } +} + +// Increments the test part result count and remembers the result. +// This method is from the TestPartResultReporterInterface interface. +void ScopedFakeTestPartResultReporter::ReportTestPartResult( + const TestPartResult& result) { + result_->Append(result); +} + +namespace internal { + +// Returns the type ID of ::testing::Test. We should always call this +// instead of GetTypeId< ::testing::Test>() to get the type ID of +// testing::Test. This is to work around a suspected linker bug when +// using Google Test as a framework on Mac OS X. The bug causes +// GetTypeId< ::testing::Test>() to return different values depending +// on whether the call is from the Google Test framework itself or +// from user test code. GetTestTypeId() is guaranteed to always +// return the same value, as it always calls GetTypeId<>() from the +// gtest.cc, which is within the Google Test framework. +TypeId GetTestTypeId() { + return GetTypeId(); +} + +// The value of GetTestTypeId() as seen from within the Google Test +// library. This is solely for testing GetTestTypeId(). +extern const TypeId kTestTypeIdInGoogleTest = GetTestTypeId(); + +// This predicate-formatter checks that 'results' contains a test part +// failure of the given type and that the failure message contains the +// given substring. +AssertionResult HasOneFailure(const char* /* results_expr */, + const char* /* type_expr */, + const char* /* substr_expr */, + const TestPartResultArray& results, + TestPartResult::Type type, + const string& substr) { + const std::string expected(type == TestPartResult::kFatalFailure ? + "1 fatal failure" : + "1 non-fatal failure"); + Message msg; + if (results.size() != 1) { + msg << "Expected: " << expected << "\n" + << " Actual: " << results.size() << " failures"; + for (int i = 0; i < results.size(); i++) { + msg << "\n" << results.GetTestPartResult(i); + } + return AssertionFailure() << msg; + } + + const TestPartResult& r = results.GetTestPartResult(0); + if (r.type() != type) { + return AssertionFailure() << "Expected: " << expected << "\n" + << " Actual:\n" + << r; + } + + if (strstr(r.message(), substr.c_str()) == NULL) { + return AssertionFailure() << "Expected: " << expected << " containing \"" + << substr << "\"\n" + << " Actual:\n" + << r; + } + + return AssertionSuccess(); +} + +// The constructor of SingleFailureChecker remembers where to look up +// test part results, what type of failure we expect, and what +// substring the failure message should contain. +SingleFailureChecker:: SingleFailureChecker( + const TestPartResultArray* results, + TestPartResult::Type type, + const string& substr) + : results_(results), + type_(type), + substr_(substr) {} + +// The destructor of SingleFailureChecker verifies that the given +// TestPartResultArray contains exactly one failure that has the given +// type and contains the given substring. If that's not the case, a +// non-fatal failure will be generated. +SingleFailureChecker::~SingleFailureChecker() { + EXPECT_PRED_FORMAT3(HasOneFailure, *results_, type_, substr_); +} + +DefaultGlobalTestPartResultReporter::DefaultGlobalTestPartResultReporter( + UnitTestImpl* unit_test) : unit_test_(unit_test) {} + +void DefaultGlobalTestPartResultReporter::ReportTestPartResult( + const TestPartResult& result) { + unit_test_->current_test_result()->AddTestPartResult(result); + unit_test_->listeners()->repeater()->OnTestPartResult(result); +} + +DefaultPerThreadTestPartResultReporter::DefaultPerThreadTestPartResultReporter( + UnitTestImpl* unit_test) : unit_test_(unit_test) {} + +void DefaultPerThreadTestPartResultReporter::ReportTestPartResult( + const TestPartResult& result) { + unit_test_->GetGlobalTestPartResultReporter()->ReportTestPartResult(result); +} + +// Returns the global test part result reporter. +TestPartResultReporterInterface* +UnitTestImpl::GetGlobalTestPartResultReporter() { + internal::MutexLock lock(&global_test_part_result_reporter_mutex_); + return global_test_part_result_repoter_; +} + +// Sets the global test part result reporter. +void UnitTestImpl::SetGlobalTestPartResultReporter( + TestPartResultReporterInterface* reporter) { + internal::MutexLock lock(&global_test_part_result_reporter_mutex_); + global_test_part_result_repoter_ = reporter; +} + +// Returns the test part result reporter for the current thread. +TestPartResultReporterInterface* +UnitTestImpl::GetTestPartResultReporterForCurrentThread() { + return per_thread_test_part_result_reporter_.get(); +} + +// Sets the test part result reporter for the current thread. +void UnitTestImpl::SetTestPartResultReporterForCurrentThread( + TestPartResultReporterInterface* reporter) { + per_thread_test_part_result_reporter_.set(reporter); +} + +// Gets the number of successful test cases. +int UnitTestImpl::successful_test_case_count() const { + return CountIf(test_cases_, TestCasePassed); +} + +// Gets the number of failed test cases. +int UnitTestImpl::failed_test_case_count() const { + return CountIf(test_cases_, TestCaseFailed); +} + +// Gets the number of all test cases. +int UnitTestImpl::total_test_case_count() const { + return static_cast(test_cases_.size()); +} + +// Gets the number of all test cases that contain at least one test +// that should run. +int UnitTestImpl::test_case_to_run_count() const { + return CountIf(test_cases_, ShouldRunTestCase); +} + +// Gets the number of successful tests. +int UnitTestImpl::successful_test_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::successful_test_count); +} + +// Gets the number of failed tests. +int UnitTestImpl::failed_test_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::failed_test_count); +} + +// Gets the number of disabled tests that will be reported in the XML report. +int UnitTestImpl::reportable_disabled_test_count() const { + return SumOverTestCaseList(test_cases_, + &TestCase::reportable_disabled_test_count); +} + +// Gets the number of disabled tests. +int UnitTestImpl::disabled_test_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::disabled_test_count); +} + +// Gets the number of tests to be printed in the XML report. +int UnitTestImpl::reportable_test_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::reportable_test_count); +} + +// Gets the number of all tests. +int UnitTestImpl::total_test_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::total_test_count); +} + +// Gets the number of tests that should run. +int UnitTestImpl::test_to_run_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::test_to_run_count); +} + +// Returns the current OS stack trace as an std::string. +// +// The maximum number of stack frames to be included is specified by +// the gtest_stack_trace_depth flag. The skip_count parameter +// specifies the number of top frames to be skipped, which doesn't +// count against the number of frames to be included. +// +// For example, if Foo() calls Bar(), which in turn calls +// CurrentOsStackTraceExceptTop(1), Foo() will be included in the +// trace but Bar() and CurrentOsStackTraceExceptTop() won't. +std::string UnitTestImpl::CurrentOsStackTraceExceptTop(int skip_count) { + (void)skip_count; + return ""; +} + +// Returns the current time in milliseconds. +TimeInMillis GetTimeInMillis() { +#if GTEST_OS_WINDOWS_MOBILE || defined(__BORLANDC__) + // Difference between 1970-01-01 and 1601-01-01 in milliseconds. + // http://analogous.blogspot.com/2005/04/epoch.html + const TimeInMillis kJavaEpochToWinFileTimeDelta = + static_cast(116444736UL) * 100000UL; + const DWORD kTenthMicrosInMilliSecond = 10000; + + SYSTEMTIME now_systime; + FILETIME now_filetime; + ULARGE_INTEGER now_int64; + // TODO(kenton@google.com): Shouldn't this just use + // GetSystemTimeAsFileTime()? + GetSystemTime(&now_systime); + if (SystemTimeToFileTime(&now_systime, &now_filetime)) { + now_int64.LowPart = now_filetime.dwLowDateTime; + now_int64.HighPart = now_filetime.dwHighDateTime; + now_int64.QuadPart = (now_int64.QuadPart / kTenthMicrosInMilliSecond) - + kJavaEpochToWinFileTimeDelta; + return now_int64.QuadPart; + } + return 0; +#elif GTEST_OS_WINDOWS && !GTEST_HAS_GETTIMEOFDAY_ + __timeb64 now; + +# ifdef _MSC_VER + + // MSVC 8 deprecates _ftime64(), so we want to suppress warning 4996 + // (deprecated function) there. + // TODO(kenton@google.com): Use GetTickCount()? Or use + // SystemTimeToFileTime() +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4996) // Temporarily disables warning 4996. + _ftime64(&now); +# pragma warning(pop) // Restores the warning state. +# else + + _ftime64(&now); + +# endif // _MSC_VER + + return static_cast(now.time) * 1000 + now.millitm; +#elif GTEST_HAS_GETTIMEOFDAY_ + struct timeval now; + gettimeofday(&now, NULL); + return static_cast(now.tv_sec) * 1000 + now.tv_usec / 1000; +#else +# error "Don't know how to get the current time on your system." +#endif +} + +// Utilities + +// class String. + +#if GTEST_OS_WINDOWS_MOBILE +// Creates a UTF-16 wide string from the given ANSI string, allocating +// memory using new. The caller is responsible for deleting the return +// value using delete[]. Returns the wide string, or NULL if the +// input is NULL. +LPCWSTR String::AnsiToUtf16(const char* ansi) { + if (!ansi) return NULL; + const int length = strlen(ansi); + const int unicode_length = + MultiByteToWideChar(CP_ACP, 0, ansi, length, + NULL, 0); + WCHAR* unicode = new WCHAR[unicode_length + 1]; + MultiByteToWideChar(CP_ACP, 0, ansi, length, + unicode, unicode_length); + unicode[unicode_length] = 0; + return unicode; +} + +// Creates an ANSI string from the given wide string, allocating +// memory using new. The caller is responsible for deleting the return +// value using delete[]. Returns the ANSI string, or NULL if the +// input is NULL. +const char* String::Utf16ToAnsi(LPCWSTR utf16_str) { + if (!utf16_str) return NULL; + const int ansi_length = + WideCharToMultiByte(CP_ACP, 0, utf16_str, -1, + NULL, 0, NULL, NULL); + char* ansi = new char[ansi_length + 1]; + WideCharToMultiByte(CP_ACP, 0, utf16_str, -1, + ansi, ansi_length, NULL, NULL); + ansi[ansi_length] = 0; + return ansi; +} + +#endif // GTEST_OS_WINDOWS_MOBILE + +// Compares two C strings. Returns true iff they have the same content. +// +// Unlike strcmp(), this function can handle NULL argument(s). A NULL +// C string is considered different to any non-NULL C string, +// including the empty string. +bool String::CStringEquals(const char * lhs, const char * rhs) { + if ( lhs == NULL ) return rhs == NULL; + + if ( rhs == NULL ) return false; + + return strcmp(lhs, rhs) == 0; +} + +#if GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING + +// Converts an array of wide chars to a narrow string using the UTF-8 +// encoding, and streams the result to the given Message object. +static void StreamWideCharsToMessage(const wchar_t* wstr, size_t length, + Message* msg) { + for (size_t i = 0; i != length; ) { // NOLINT + if (wstr[i] != L'\0') { + *msg << WideStringToUtf8(wstr + i, static_cast(length - i)); + while (i != length && wstr[i] != L'\0') + i++; + } else { + *msg << '\0'; + i++; + } + } +} + +#endif // GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING + +} // namespace internal + +// Constructs an empty Message. +// We allocate the stringstream separately because otherwise each use of +// ASSERT/EXPECT in a procedure adds over 200 bytes to the procedure's +// stack frame leading to huge stack frames in some cases; gcc does not reuse +// the stack space. +Message::Message() : ss_(new ::std::stringstream) { + // By default, we want there to be enough precision when printing + // a double to a Message. + *ss_ << std::setprecision(std::numeric_limits::digits10 + 2); +} + +// These two overloads allow streaming a wide C string to a Message +// using the UTF-8 encoding. +Message& Message::operator <<(const wchar_t* wide_c_str) { + return *this << internal::String::ShowWideCString(wide_c_str); +} +Message& Message::operator <<(wchar_t* wide_c_str) { + return *this << internal::String::ShowWideCString(wide_c_str); +} + +#if GTEST_HAS_STD_WSTRING +// Converts the given wide string to a narrow string using the UTF-8 +// encoding, and streams the result to this Message object. +Message& Message::operator <<(const ::std::wstring& wstr) { + internal::StreamWideCharsToMessage(wstr.c_str(), wstr.length(), this); + return *this; +} +#endif // GTEST_HAS_STD_WSTRING + +#if GTEST_HAS_GLOBAL_WSTRING +// Converts the given wide string to a narrow string using the UTF-8 +// encoding, and streams the result to this Message object. +Message& Message::operator <<(const ::wstring& wstr) { + internal::StreamWideCharsToMessage(wstr.c_str(), wstr.length(), this); + return *this; +} +#endif // GTEST_HAS_GLOBAL_WSTRING + +// Gets the text streamed to this object so far as an std::string. +// Each '\0' character in the buffer is replaced with "\\0". +std::string Message::GetString() const { + return internal::StringStreamToString(ss_.get()); +} + +// AssertionResult constructors. +// Used in EXPECT_TRUE/FALSE(assertion_result). +AssertionResult::AssertionResult(const AssertionResult& other) + : success_(other.success_), + message_(other.message_.get() != NULL ? + new ::std::string(*other.message_) : + static_cast< ::std::string*>(NULL)) { +} + +// Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE. +AssertionResult AssertionResult::operator!() const { + AssertionResult negation(!success_); + if (message_.get() != NULL) + negation << *message_; + return negation; +} + +// Makes a successful assertion result. +AssertionResult AssertionSuccess() { + return AssertionResult(true); +} + +// Makes a failed assertion result. +AssertionResult AssertionFailure() { + return AssertionResult(false); +} + +// Makes a failed assertion result with the given failure message. +// Deprecated; use AssertionFailure() << message. +AssertionResult AssertionFailure(const Message& message) { + return AssertionFailure() << message; +} + +namespace internal { + +// Constructs and returns the message for an equality assertion +// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure. +// +// The first four parameters are the expressions used in the assertion +// and their values, as strings. For example, for ASSERT_EQ(foo, bar) +// where foo is 5 and bar is 6, we have: +// +// expected_expression: "foo" +// actual_expression: "bar" +// expected_value: "5" +// actual_value: "6" +// +// The ignoring_case parameter is true iff the assertion is a +// *_STRCASEEQ*. When it's true, the string " (ignoring case)" will +// be inserted into the message. +AssertionResult EqFailure(const char* expected_expression, + const char* actual_expression, + const std::string& expected_value, + const std::string& actual_value, + bool ignoring_case) { + Message msg; + msg << "Value of: " << actual_expression; + if (actual_value != actual_expression) { + msg << "\n Actual: " << actual_value; + } + + msg << "\nExpected: " << expected_expression; + if (ignoring_case) { + msg << " (ignoring case)"; + } + if (expected_value != expected_expression) { + msg << "\nWhich is: " << expected_value; + } + + return AssertionFailure() << msg; +} + +// Constructs a failure message for Boolean assertions such as EXPECT_TRUE. +std::string GetBoolAssertionFailureMessage( + const AssertionResult& assertion_result, + const char* expression_text, + const char* actual_predicate_value, + const char* expected_predicate_value) { + const char* actual_message = assertion_result.message(); + Message msg; + msg << "Value of: " << expression_text + << "\n Actual: " << actual_predicate_value; + if (actual_message[0] != '\0') + msg << " (" << actual_message << ")"; + msg << "\nExpected: " << expected_predicate_value; + return msg.GetString(); +} + +// Helper function for implementing ASSERT_NEAR. +AssertionResult DoubleNearPredFormat(const char* expr1, + const char* expr2, + const char* abs_error_expr, + double val1, + double val2, + double abs_error) { + const double diff = fabs(val1 - val2); + if (diff <= abs_error) return AssertionSuccess(); + + // TODO(wan): do not print the value of an expression if it's + // already a literal. + return AssertionFailure() + << "The difference between " << expr1 << " and " << expr2 + << " is " << diff << ", which exceeds " << abs_error_expr << ", where\n" + << expr1 << " evaluates to " << val1 << ",\n" + << expr2 << " evaluates to " << val2 << ", and\n" + << abs_error_expr << " evaluates to " << abs_error << "."; +} + + +// Helper template for implementing FloatLE() and DoubleLE(). +template +AssertionResult FloatingPointLE(const char* expr1, + const char* expr2, + RawType val1, + RawType val2) { + // Returns success if val1 is less than val2, + if (val1 < val2) { + return AssertionSuccess(); + } + + // or if val1 is almost equal to val2. + const FloatingPoint lhs(val1), rhs(val2); + if (lhs.AlmostEquals(rhs)) { + return AssertionSuccess(); + } + + // Note that the above two checks will both fail if either val1 or + // val2 is NaN, as the IEEE floating-point standard requires that + // any predicate involving a NaN must return false. + + ::std::stringstream val1_ss; + val1_ss << std::setprecision(std::numeric_limits::digits10 + 2) + << val1; + + ::std::stringstream val2_ss; + val2_ss << std::setprecision(std::numeric_limits::digits10 + 2) + << val2; + + return AssertionFailure() + << "Expected: (" << expr1 << ") <= (" << expr2 << ")\n" + << " Actual: " << StringStreamToString(&val1_ss) << " vs " + << StringStreamToString(&val2_ss); +} + +} // namespace internal + +// Asserts that val1 is less than, or almost equal to, val2. Fails +// otherwise. In particular, it fails if either val1 or val2 is NaN. +AssertionResult FloatLE(const char* expr1, const char* expr2, + float val1, float val2) { + return internal::FloatingPointLE(expr1, expr2, val1, val2); +} + +// Asserts that val1 is less than, or almost equal to, val2. Fails +// otherwise. In particular, it fails if either val1 or val2 is NaN. +AssertionResult DoubleLE(const char* expr1, const char* expr2, + double val1, double val2) { + return internal::FloatingPointLE(expr1, expr2, val1, val2); +} + +namespace internal { + +// The helper function for {ASSERT|EXPECT}_EQ with int or enum +// arguments. +AssertionResult CmpHelperEQ(const char* expected_expression, + const char* actual_expression, + BiggestInt expected, + BiggestInt actual) { + if (expected == actual) { + return AssertionSuccess(); + } + + return EqFailure(expected_expression, + actual_expression, + FormatForComparisonFailureMessage(expected, actual), + FormatForComparisonFailureMessage(actual, expected), + false); +} + +// A macro for implementing the helper functions needed to implement +// ASSERT_?? and EXPECT_?? with integer or enum arguments. It is here +// just to avoid copy-and-paste of similar code. +#define GTEST_IMPL_CMP_HELPER_(op_name, op)\ +AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \ + BiggestInt val1, BiggestInt val2) {\ + if (val1 op val2) {\ + return AssertionSuccess();\ + } else {\ + return AssertionFailure() \ + << "Expected: (" << expr1 << ") " #op " (" << expr2\ + << "), actual: " << FormatForComparisonFailureMessage(val1, val2)\ + << " vs " << FormatForComparisonFailureMessage(val2, val1);\ + }\ +} + +// Implements the helper function for {ASSERT|EXPECT}_NE with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(NE, !=) +// Implements the helper function for {ASSERT|EXPECT}_LE with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(LE, <=) +// Implements the helper function for {ASSERT|EXPECT}_LT with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(LT, < ) +// Implements the helper function for {ASSERT|EXPECT}_GE with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(GE, >=) +// Implements the helper function for {ASSERT|EXPECT}_GT with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(GT, > ) + +#undef GTEST_IMPL_CMP_HELPER_ + +// The helper function for {ASSERT|EXPECT}_STREQ. +AssertionResult CmpHelperSTREQ(const char* expected_expression, + const char* actual_expression, + const char* expected, + const char* actual) { + if (String::CStringEquals(expected, actual)) { + return AssertionSuccess(); + } + + return EqFailure(expected_expression, + actual_expression, + PrintToString(expected), + PrintToString(actual), + false); +} + +// The helper function for {ASSERT|EXPECT}_STRCASEEQ. +AssertionResult CmpHelperSTRCASEEQ(const char* expected_expression, + const char* actual_expression, + const char* expected, + const char* actual) { + if (String::CaseInsensitiveCStringEquals(expected, actual)) { + return AssertionSuccess(); + } + + return EqFailure(expected_expression, + actual_expression, + PrintToString(expected), + PrintToString(actual), + true); +} + +// The helper function for {ASSERT|EXPECT}_STRNE. +AssertionResult CmpHelperSTRNE(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2) { + if (!String::CStringEquals(s1, s2)) { + return AssertionSuccess(); + } else { + return AssertionFailure() << "Expected: (" << s1_expression << ") != (" + << s2_expression << "), actual: \"" + << s1 << "\" vs \"" << s2 << "\""; + } +} + +// The helper function for {ASSERT|EXPECT}_STRCASENE. +AssertionResult CmpHelperSTRCASENE(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2) { + if (!String::CaseInsensitiveCStringEquals(s1, s2)) { + return AssertionSuccess(); + } else { + return AssertionFailure() + << "Expected: (" << s1_expression << ") != (" + << s2_expression << ") (ignoring case), actual: \"" + << s1 << "\" vs \"" << s2 << "\""; + } +} + +} // namespace internal + +namespace { + +// Helper functions for implementing IsSubString() and IsNotSubstring(). + +// This group of overloaded functions return true iff needle is a +// substring of haystack. NULL is considered a substring of itself +// only. + +bool IsSubstringPred(const char* needle, const char* haystack) { + if (needle == NULL || haystack == NULL) + return needle == haystack; + + return strstr(haystack, needle) != NULL; +} + +bool IsSubstringPred(const wchar_t* needle, const wchar_t* haystack) { + if (needle == NULL || haystack == NULL) + return needle == haystack; + + return wcsstr(haystack, needle) != NULL; +} + +// StringType here can be either ::std::string or ::std::wstring. +template +bool IsSubstringPred(const StringType& needle, + const StringType& haystack) { + return haystack.find(needle) != StringType::npos; +} + +// This function implements either IsSubstring() or IsNotSubstring(), +// depending on the value of the expected_to_be_substring parameter. +// StringType here can be const char*, const wchar_t*, ::std::string, +// or ::std::wstring. +template +AssertionResult IsSubstringImpl( + bool expected_to_be_substring, + const char* needle_expr, const char* haystack_expr, + const StringType& needle, const StringType& haystack) { + if (IsSubstringPred(needle, haystack) == expected_to_be_substring) + return AssertionSuccess(); + + const bool is_wide_string = sizeof(needle[0]) > 1; + const char* const begin_string_quote = is_wide_string ? "L\"" : "\""; + return AssertionFailure() + << "Value of: " << needle_expr << "\n" + << " Actual: " << begin_string_quote << needle << "\"\n" + << "Expected: " << (expected_to_be_substring ? "" : "not ") + << "a substring of " << haystack_expr << "\n" + << "Which is: " << begin_string_quote << haystack << "\""; +} + +} // namespace + +// IsSubstring() and IsNotSubstring() check whether needle is a +// substring of haystack (NULL is considered a substring of itself +// only), and return an appropriate error message when they fail. + +AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const char* needle, const char* haystack) { + return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const wchar_t* needle, const wchar_t* haystack) { + return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const char* needle, const char* haystack) { + return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const wchar_t* needle, const wchar_t* haystack) { + return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::string& needle, const ::std::string& haystack) { + return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::string& needle, const ::std::string& haystack) { + return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); +} + +#if GTEST_HAS_STD_WSTRING +AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::wstring& needle, const ::std::wstring& haystack) { + return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::wstring& needle, const ::std::wstring& haystack) { + return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); +} +#endif // GTEST_HAS_STD_WSTRING + +namespace internal { + +#if GTEST_OS_WINDOWS + +namespace { + +// Helper function for IsHRESULT{SuccessFailure} predicates +AssertionResult HRESULTFailureHelper(const char* expr, + const char* expected, + long hr) { // NOLINT +# if GTEST_OS_WINDOWS_MOBILE + + // Windows CE doesn't support FormatMessage. + const char error_text[] = ""; + +# else + + // Looks up the human-readable system message for the HRESULT code + // and since we're not passing any params to FormatMessage, we don't + // want inserts expanded. + const DWORD kFlags = FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS; + const DWORD kBufSize = 4096; + // Gets the system's human readable message string for this HRESULT. + char error_text[kBufSize] = { '\0' }; + DWORD message_length = ::FormatMessageA(kFlags, + 0, // no source, we're asking system + hr, // the error + 0, // no line width restrictions + error_text, // output buffer + kBufSize, // buf size + NULL); // no arguments for inserts + // Trims tailing white space (FormatMessage leaves a trailing CR-LF) + for (; message_length && IsSpace(error_text[message_length - 1]); + --message_length) { + error_text[message_length - 1] = '\0'; + } + +# endif // GTEST_OS_WINDOWS_MOBILE + + const std::string error_hex("0x" + String::FormatHexInt(hr)); + return ::testing::AssertionFailure() + << "Expected: " << expr << " " << expected << ".\n" + << " Actual: " << error_hex << " " << error_text << "\n"; +} + +} // namespace + +AssertionResult IsHRESULTSuccess(const char* expr, long hr) { // NOLINT + if (SUCCEEDED(hr)) { + return AssertionSuccess(); + } + return HRESULTFailureHelper(expr, "succeeds", hr); +} + +AssertionResult IsHRESULTFailure(const char* expr, long hr) { // NOLINT + if (FAILED(hr)) { + return AssertionSuccess(); + } + return HRESULTFailureHelper(expr, "fails", hr); +} + +#endif // GTEST_OS_WINDOWS + +// Utility functions for encoding Unicode text (wide strings) in +// UTF-8. + +// A Unicode code-point can have upto 21 bits, and is encoded in UTF-8 +// like this: +// +// Code-point length Encoding +// 0 - 7 bits 0xxxxxxx +// 8 - 11 bits 110xxxxx 10xxxxxx +// 12 - 16 bits 1110xxxx 10xxxxxx 10xxxxxx +// 17 - 21 bits 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + +// The maximum code-point a one-byte UTF-8 sequence can represent. +const UInt32 kMaxCodePoint1 = (static_cast(1) << 7) - 1; + +// The maximum code-point a two-byte UTF-8 sequence can represent. +const UInt32 kMaxCodePoint2 = (static_cast(1) << (5 + 6)) - 1; + +// The maximum code-point a three-byte UTF-8 sequence can represent. +const UInt32 kMaxCodePoint3 = (static_cast(1) << (4 + 2*6)) - 1; + +// The maximum code-point a four-byte UTF-8 sequence can represent. +const UInt32 kMaxCodePoint4 = (static_cast(1) << (3 + 3*6)) - 1; + +// Chops off the n lowest bits from a bit pattern. Returns the n +// lowest bits. As a side effect, the original bit pattern will be +// shifted to the right by n bits. +inline UInt32 ChopLowBits(UInt32* bits, int n) { + const UInt32 low_bits = *bits & ((static_cast(1) << n) - 1); + *bits >>= n; + return low_bits; +} + +// Converts a Unicode code point to a narrow string in UTF-8 encoding. +// code_point parameter is of type UInt32 because wchar_t may not be +// wide enough to contain a code point. +// If the code_point is not a valid Unicode code point +// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be converted +// to "(Invalid Unicode 0xXXXXXXXX)". +std::string CodePointToUtf8(UInt32 code_point) { + if (code_point > kMaxCodePoint4) { + return "(Invalid Unicode 0x" + String::FormatHexInt(code_point) + ")"; + } + + char str[5]; // Big enough for the largest valid code point. + if (code_point <= kMaxCodePoint1) { + str[1] = '\0'; + str[0] = static_cast(code_point); // 0xxxxxxx + } else if (code_point <= kMaxCodePoint2) { + str[2] = '\0'; + str[1] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[0] = static_cast(0xC0 | code_point); // 110xxxxx + } else if (code_point <= kMaxCodePoint3) { + str[3] = '\0'; + str[2] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[1] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[0] = static_cast(0xE0 | code_point); // 1110xxxx + } else { // code_point <= kMaxCodePoint4 + str[4] = '\0'; + str[3] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[2] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[1] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[0] = static_cast(0xF0 | code_point); // 11110xxx + } + return str; +} + +// The following two functions only make sense if the the system +// uses UTF-16 for wide string encoding. All supported systems +// with 16 bit wchar_t (Windows, Cygwin, Symbian OS) do use UTF-16. + +// Determines if the arguments constitute UTF-16 surrogate pair +// and thus should be combined into a single Unicode code point +// using CreateCodePointFromUtf16SurrogatePair. +inline bool IsUtf16SurrogatePair(wchar_t first, wchar_t second) { + return sizeof(wchar_t) == 2 && + (first & 0xFC00) == 0xD800 && (second & 0xFC00) == 0xDC00; +} + +// Creates a Unicode code point from UTF16 surrogate pair. +inline UInt32 CreateCodePointFromUtf16SurrogatePair(wchar_t first, + wchar_t second) { + const UInt32 mask = (1 << 10) - 1; + return (sizeof(wchar_t) == 2) ? + (((first & mask) << 10) | (second & mask)) + 0x10000 : + // This function should not be called when the condition is + // false, but we provide a sensible default in case it is. + static_cast(first); +} + +// Converts a wide string to a narrow string in UTF-8 encoding. +// The wide string is assumed to have the following encoding: +// UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin, Symbian OS) +// UTF-32 if sizeof(wchar_t) == 4 (on Linux) +// Parameter str points to a null-terminated wide string. +// Parameter num_chars may additionally limit the number +// of wchar_t characters processed. -1 is used when the entire string +// should be processed. +// If the string contains code points that are not valid Unicode code points +// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output +// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding +// and contains invalid UTF-16 surrogate pairs, values in those pairs +// will be encoded as individual Unicode characters from Basic Normal Plane. +std::string WideStringToUtf8(const wchar_t* str, int num_chars) { + if (num_chars == -1) + num_chars = static_cast(wcslen(str)); + + ::std::stringstream stream; + for (int i = 0; i < num_chars; ++i) { + UInt32 unicode_code_point; + + if (str[i] == L'\0') { + break; + } else if (i + 1 < num_chars && IsUtf16SurrogatePair(str[i], str[i + 1])) { + unicode_code_point = CreateCodePointFromUtf16SurrogatePair(str[i], + str[i + 1]); + i++; + } else { + unicode_code_point = static_cast(str[i]); + } + + stream << CodePointToUtf8(unicode_code_point); + } + return StringStreamToString(&stream); +} + +// Converts a wide C string to an std::string using the UTF-8 encoding. +// NULL will be converted to "(null)". +std::string String::ShowWideCString(const wchar_t * wide_c_str) { + if (wide_c_str == NULL) return "(null)"; + + return internal::WideStringToUtf8(wide_c_str, -1); +} + +// Compares two wide C strings. Returns true iff they have the same +// content. +// +// Unlike wcscmp(), this function can handle NULL argument(s). A NULL +// C string is considered different to any non-NULL C string, +// including the empty string. +bool String::WideCStringEquals(const wchar_t * lhs, const wchar_t * rhs) { + if (lhs == NULL) return rhs == NULL; + + if (rhs == NULL) return false; + + return wcscmp(lhs, rhs) == 0; +} + +// Helper function for *_STREQ on wide strings. +AssertionResult CmpHelperSTREQ(const char* expected_expression, + const char* actual_expression, + const wchar_t* expected, + const wchar_t* actual) { + if (String::WideCStringEquals(expected, actual)) { + return AssertionSuccess(); + } + + return EqFailure(expected_expression, + actual_expression, + PrintToString(expected), + PrintToString(actual), + false); +} + +// Helper function for *_STRNE on wide strings. +AssertionResult CmpHelperSTRNE(const char* s1_expression, + const char* s2_expression, + const wchar_t* s1, + const wchar_t* s2) { + if (!String::WideCStringEquals(s1, s2)) { + return AssertionSuccess(); + } + + return AssertionFailure() << "Expected: (" << s1_expression << ") != (" + << s2_expression << "), actual: " + << PrintToString(s1) + << " vs " << PrintToString(s2); +} + +// Compares two C strings, ignoring case. Returns true iff they have +// the same content. +// +// Unlike strcasecmp(), this function can handle NULL argument(s). A +// NULL C string is considered different to any non-NULL C string, +// including the empty string. +bool String::CaseInsensitiveCStringEquals(const char * lhs, const char * rhs) { + if (lhs == NULL) + return rhs == NULL; + if (rhs == NULL) + return false; + return posix::StrCaseCmp(lhs, rhs) == 0; +} + + // Compares two wide C strings, ignoring case. Returns true iff they + // have the same content. + // + // Unlike wcscasecmp(), this function can handle NULL argument(s). + // A NULL C string is considered different to any non-NULL wide C string, + // including the empty string. + // NB: The implementations on different platforms slightly differ. + // On windows, this method uses _wcsicmp which compares according to LC_CTYPE + // environment variable. On GNU platform this method uses wcscasecmp + // which compares according to LC_CTYPE category of the current locale. + // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the + // current locale. +bool String::CaseInsensitiveWideCStringEquals(const wchar_t* lhs, + const wchar_t* rhs) { + if (lhs == NULL) return rhs == NULL; + + if (rhs == NULL) return false; + +#if GTEST_OS_WINDOWS + return _wcsicmp(lhs, rhs) == 0; +#elif GTEST_OS_LINUX && !GTEST_OS_LINUX_ANDROID + return wcscasecmp(lhs, rhs) == 0; +#else + // Android, Mac OS X and Cygwin don't define wcscasecmp. + // Other unknown OSes may not define it either. + wint_t left, right; + do { + left = towlower(*lhs++); + right = towlower(*rhs++); + } while (left && left == right); + return left == right; +#endif // OS selector +} + +// Returns true iff str ends with the given suffix, ignoring case. +// Any string is considered to end with an empty suffix. +bool String::EndsWithCaseInsensitive( + const std::string& str, const std::string& suffix) { + const size_t str_len = str.length(); + const size_t suffix_len = suffix.length(); + return (str_len >= suffix_len) && + CaseInsensitiveCStringEquals(str.c_str() + str_len - suffix_len, + suffix.c_str()); +} + +// Formats an int value as "%02d". +std::string String::FormatIntWidth2(int value) { + std::stringstream ss; + ss << std::setfill('0') << std::setw(2) << value; + return ss.str(); +} + +// Formats an int value as "%X". +std::string String::FormatHexInt(int value) { + std::stringstream ss; + ss << std::hex << std::uppercase << value; + return ss.str(); +} + +// Formats a byte as "%02X". +std::string String::FormatByte(unsigned char value) { + std::stringstream ss; + ss << std::setfill('0') << std::setw(2) << std::hex << std::uppercase + << static_cast(value); + return ss.str(); +} + +// Converts the buffer in a stringstream to an std::string, converting NUL +// bytes to "\\0" along the way. +std::string StringStreamToString(::std::stringstream* ss) { + const ::std::string& str = ss->str(); + const char* const start = str.c_str(); + const char* const end = start + str.length(); + + std::string result; + result.reserve(2 * (end - start)); + for (const char* ch = start; ch != end; ++ch) { + if (*ch == '\0') { + result += "\\0"; // Replaces NUL with "\\0"; + } else { + result += *ch; + } + } + + return result; +} + +// Appends the user-supplied message to the Google-Test-generated message. +std::string AppendUserMessage(const std::string& gtest_msg, + const Message& user_msg) { + // Appends the user message if it's non-empty. + const std::string user_msg_string = user_msg.GetString(); + if (user_msg_string.empty()) { + return gtest_msg; + } + + return gtest_msg + "\n" + user_msg_string; +} + +} // namespace internal + +// class TestResult + +// Creates an empty TestResult. +TestResult::TestResult() + : death_test_count_(0), + elapsed_time_(0) { +} + +// D'tor. +TestResult::~TestResult() { +} + +// Returns the i-th test part result among all the results. i can +// range from 0 to total_part_count() - 1. If i is not in that range, +// aborts the program. +const TestPartResult& TestResult::GetTestPartResult(int i) const { + if (i < 0 || i >= total_part_count()) + internal::posix::Abort(); + return test_part_results_.at(i); +} + +// Returns the i-th test property. i can range from 0 to +// test_property_count() - 1. If i is not in that range, aborts the +// program. +const TestProperty& TestResult::GetTestProperty(int i) const { + if (i < 0 || i >= test_property_count()) + internal::posix::Abort(); + return test_properties_.at(i); +} + +// Clears the test part results. +void TestResult::ClearTestPartResults() { + test_part_results_.clear(); +} + +// Adds a test part result to the list. +void TestResult::AddTestPartResult(const TestPartResult& test_part_result) { + test_part_results_.push_back(test_part_result); +} + +// Adds a test property to the list. If a property with the same key as the +// supplied property is already represented, the value of this test_property +// replaces the old value for that key. +void TestResult::RecordProperty(const std::string& xml_element, + const TestProperty& test_property) { + if (!ValidateTestProperty(xml_element, test_property)) { + return; + } + internal::MutexLock lock(&test_properites_mutex_); + const std::vector::iterator property_with_matching_key = + std::find_if(test_properties_.begin(), test_properties_.end(), + internal::TestPropertyKeyIs(test_property.key())); + if (property_with_matching_key == test_properties_.end()) { + test_properties_.push_back(test_property); + return; + } + property_with_matching_key->SetValue(test_property.value()); +} + +// The list of reserved attributes used in the element of XML +// output. +static const char* const kReservedTestSuitesAttributes[] = { + "disabled", + "errors", + "failures", + "name", + "random_seed", + "tests", + "time", + "timestamp" +}; + +// The list of reserved attributes used in the element of XML +// output. +static const char* const kReservedTestSuiteAttributes[] = { + "disabled", + "errors", + "failures", + "name", + "tests", + "time" +}; + +// The list of reserved attributes used in the element of XML output. +static const char* const kReservedTestCaseAttributes[] = { + "classname", + "name", + "status", + "time", + "type_param", + "value_param" +}; + +template +std::vector ArrayAsVector(const char* const (&array)[kSize]) { + return std::vector(array, array + kSize); +} + +static std::vector GetReservedAttributesForElement( + const std::string& xml_element) { + if (xml_element == "testsuites") { + return ArrayAsVector(kReservedTestSuitesAttributes); + } else if (xml_element == "testsuite") { + return ArrayAsVector(kReservedTestSuiteAttributes); + } else if (xml_element == "testcase") { + return ArrayAsVector(kReservedTestCaseAttributes); + } else { + GTEST_CHECK_(false) << "Unrecognized xml_element provided: " << xml_element; + } + // This code is unreachable but some compilers may not realizes that. + return std::vector(); +} + +static std::string FormatWordList(const std::vector& words) { + Message word_list; + for (size_t i = 0; i < words.size(); ++i) { + if (i > 0 && words.size() > 2) { + word_list << ", "; + } + if (i == words.size() - 1) { + word_list << "and "; + } + word_list << "'" << words[i] << "'"; + } + return word_list.GetString(); +} + +bool ValidateTestPropertyName(const std::string& property_name, + const std::vector& reserved_names) { + if (std::find(reserved_names.begin(), reserved_names.end(), property_name) != + reserved_names.end()) { + ADD_FAILURE() << "Reserved key used in RecordProperty(): " << property_name + << " (" << FormatWordList(reserved_names) + << " are reserved by " << GTEST_NAME_ << ")"; + return false; + } + return true; +} + +// Adds a failure if the key is a reserved attribute of the element named +// xml_element. Returns true if the property is valid. +bool TestResult::ValidateTestProperty(const std::string& xml_element, + const TestProperty& test_property) { + return ValidateTestPropertyName(test_property.key(), + GetReservedAttributesForElement(xml_element)); +} + +// Clears the object. +void TestResult::Clear() { + test_part_results_.clear(); + test_properties_.clear(); + death_test_count_ = 0; + elapsed_time_ = 0; +} + +// Returns true iff the test failed. +bool TestResult::Failed() const { + for (int i = 0; i < total_part_count(); ++i) { + if (GetTestPartResult(i).failed()) + return true; + } + return false; +} + +// Returns true iff the test part fatally failed. +static bool TestPartFatallyFailed(const TestPartResult& result) { + return result.fatally_failed(); +} + +// Returns true iff the test fatally failed. +bool TestResult::HasFatalFailure() const { + return CountIf(test_part_results_, TestPartFatallyFailed) > 0; +} + +// Returns true iff the test part non-fatally failed. +static bool TestPartNonfatallyFailed(const TestPartResult& result) { + return result.nonfatally_failed(); +} + +// Returns true iff the test has a non-fatal failure. +bool TestResult::HasNonfatalFailure() const { + return CountIf(test_part_results_, TestPartNonfatallyFailed) > 0; +} + +// Gets the number of all test parts. This is the sum of the number +// of successful test parts and the number of failed test parts. +int TestResult::total_part_count() const { + return static_cast(test_part_results_.size()); +} + +// Returns the number of the test properties. +int TestResult::test_property_count() const { + return static_cast(test_properties_.size()); +} + +// class Test + +// Creates a Test object. + +// The c'tor saves the values of all Google Test flags. +Test::Test() + : gtest_flag_saver_(new internal::GTestFlagSaver) { +} + +// The d'tor restores the values of all Google Test flags. +Test::~Test() { + delete gtest_flag_saver_; +} + +// Sets up the test fixture. +// +// A sub-class may override this. +void Test::SetUp() { +} + +// Tears down the test fixture. +// +// A sub-class may override this. +void Test::TearDown() { +} + +// Allows user supplied key value pairs to be recorded for later output. +void Test::RecordProperty(const std::string& key, const std::string& value) { + UnitTest::GetInstance()->RecordProperty(key, value); +} + +// Allows user supplied key value pairs to be recorded for later output. +void Test::RecordProperty(const std::string& key, int value) { + Message value_message; + value_message << value; + RecordProperty(key, value_message.GetString().c_str()); +} + +namespace internal { + +void ReportFailureInUnknownLocation(TestPartResult::Type result_type, + const std::string& message) { + // This function is a friend of UnitTest and as such has access to + // AddTestPartResult. + UnitTest::GetInstance()->AddTestPartResult( + result_type, + NULL, // No info about the source file where the exception occurred. + -1, // We have no info on which line caused the exception. + message, + ""); // No stack trace, either. +} + +} // namespace internal + +// Google Test requires all tests in the same test case to use the same test +// fixture class. This function checks if the current test has the +// same fixture class as the first test in the current test case. If +// yes, it returns true; otherwise it generates a Google Test failure and +// returns false. +bool Test::HasSameFixtureClass() { + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + const TestCase* const test_case = impl->current_test_case(); + + // Info about the first test in the current test case. + const TestInfo* const first_test_info = test_case->test_info_list()[0]; + const internal::TypeId first_fixture_id = first_test_info->fixture_class_id_; + const char* const first_test_name = first_test_info->name(); + + // Info about the current test. + const TestInfo* const this_test_info = impl->current_test_info(); + const internal::TypeId this_fixture_id = this_test_info->fixture_class_id_; + const char* const this_test_name = this_test_info->name(); + + if (this_fixture_id != first_fixture_id) { + // Is the first test defined using TEST? + const bool first_is_TEST = first_fixture_id == internal::GetTestTypeId(); + // Is this test defined using TEST? + const bool this_is_TEST = this_fixture_id == internal::GetTestTypeId(); + + if (first_is_TEST || this_is_TEST) { + // The user mixed TEST and TEST_F in this test case - we'll tell + // him/her how to fix it. + + // Gets the name of the TEST and the name of the TEST_F. Note + // that first_is_TEST and this_is_TEST cannot both be true, as + // the fixture IDs are different for the two tests. + const char* const TEST_name = + first_is_TEST ? first_test_name : this_test_name; + const char* const TEST_F_name = + first_is_TEST ? this_test_name : first_test_name; + + ADD_FAILURE() + << "All tests in the same test case must use the same test fixture\n" + << "class, so mixing TEST_F and TEST in the same test case is\n" + << "illegal. In test case " << this_test_info->test_case_name() + << ",\n" + << "test " << TEST_F_name << " is defined using TEST_F but\n" + << "test " << TEST_name << " is defined using TEST. You probably\n" + << "want to change the TEST to TEST_F or move it to another test\n" + << "case."; + } else { + // The user defined two fixture classes with the same name in + // two namespaces - we'll tell him/her how to fix it. + ADD_FAILURE() + << "All tests in the same test case must use the same test fixture\n" + << "class. However, in test case " + << this_test_info->test_case_name() << ",\n" + << "you defined test " << first_test_name + << " and test " << this_test_name << "\n" + << "using two different test fixture classes. This can happen if\n" + << "the two classes are from different namespaces or translation\n" + << "units and have the same name. You should probably rename one\n" + << "of the classes to put the tests into different test cases."; + } + return false; + } + + return true; +} + +#if GTEST_HAS_SEH + +// Adds an "exception thrown" fatal failure to the current test. This +// function returns its result via an output parameter pointer because VC++ +// prohibits creation of objects with destructors on stack in functions +// using __try (see error C2712). +static std::string* FormatSehExceptionMessage(DWORD exception_code, + const char* location) { + Message message; + message << "SEH exception with code 0x" << std::setbase(16) << + exception_code << std::setbase(10) << " thrown in " << location << "."; + + return new std::string(message.GetString()); +} + +#endif // GTEST_HAS_SEH + +namespace internal { + +#if GTEST_HAS_EXCEPTIONS + +// Adds an "exception thrown" fatal failure to the current test. +static std::string FormatCxxExceptionMessage(const char* description, + const char* location) { + Message message; + if (description != NULL) { + message << "C++ exception with description \"" << description << "\""; + } else { + message << "Unknown C++ exception"; + } + message << " thrown in " << location << "."; + + return message.GetString(); +} + +static std::string PrintTestPartResultToString( + const TestPartResult& test_part_result); + +GoogleTestFailureException::GoogleTestFailureException( + const TestPartResult& failure) + : ::std::runtime_error(PrintTestPartResultToString(failure).c_str()) {} + +#endif // GTEST_HAS_EXCEPTIONS + +// We put these helper functions in the internal namespace as IBM's xlC +// compiler rejects the code if they were declared static. + +// Runs the given method and handles SEH exceptions it throws, when +// SEH is supported; returns the 0-value for type Result in case of an +// SEH exception. (Microsoft compilers cannot handle SEH and C++ +// exceptions in the same function. Therefore, we provide a separate +// wrapper function for handling SEH exceptions.) +template +Result HandleSehExceptionsInMethodIfSupported( + T* object, Result (T::*method)(), const char* location) { +#if GTEST_HAS_SEH + __try { + return (object->*method)(); + } __except (internal::UnitTestOptions::GTestShouldProcessSEH( // NOLINT + GetExceptionCode())) { + // We create the exception message on the heap because VC++ prohibits + // creation of objects with destructors on stack in functions using __try + // (see error C2712). + std::string* exception_message = FormatSehExceptionMessage( + GetExceptionCode(), location); + internal::ReportFailureInUnknownLocation(TestPartResult::kFatalFailure, + *exception_message); + delete exception_message; + return static_cast(0); + } +#else + (void)location; + return (object->*method)(); +#endif // GTEST_HAS_SEH +} + +// Runs the given method and catches and reports C++ and/or SEH-style +// exceptions, if they are supported; returns the 0-value for type +// Result in case of an SEH exception. +template +Result HandleExceptionsInMethodIfSupported( + T* object, Result (T::*method)(), const char* location) { + // NOTE: The user code can affect the way in which Google Test handles + // exceptions by setting GTEST_FLAG(catch_exceptions), but only before + // RUN_ALL_TESTS() starts. It is technically possible to check the flag + // after the exception is caught and either report or re-throw the + // exception based on the flag's value: + // + // try { + // // Perform the test method. + // } catch (...) { + // if (GTEST_FLAG(catch_exceptions)) + // // Report the exception as failure. + // else + // throw; // Re-throws the original exception. + // } + // + // However, the purpose of this flag is to allow the program to drop into + // the debugger when the exception is thrown. On most platforms, once the + // control enters the catch block, the exception origin information is + // lost and the debugger will stop the program at the point of the + // re-throw in this function -- instead of at the point of the original + // throw statement in the code under test. For this reason, we perform + // the check early, sacrificing the ability to affect Google Test's + // exception handling in the method where the exception is thrown. + if (internal::GetUnitTestImpl()->catch_exceptions()) { +#if GTEST_HAS_EXCEPTIONS + try { + return HandleSehExceptionsInMethodIfSupported(object, method, location); + } catch (const internal::GoogleTestFailureException&) { // NOLINT + // This exception type can only be thrown by a failed Google + // Test assertion with the intention of letting another testing + // framework catch it. Therefore we just re-throw it. + throw; + } catch (const std::exception& e) { // NOLINT + internal::ReportFailureInUnknownLocation( + TestPartResult::kFatalFailure, + FormatCxxExceptionMessage(e.what(), location)); + } catch (...) { // NOLINT + internal::ReportFailureInUnknownLocation( + TestPartResult::kFatalFailure, + FormatCxxExceptionMessage(NULL, location)); + } + return static_cast(0); +#else + return HandleSehExceptionsInMethodIfSupported(object, method, location); +#endif // GTEST_HAS_EXCEPTIONS + } else { + return (object->*method)(); + } +} + +} // namespace internal + +// Runs the test and updates the test result. +void Test::Run() { + if (!HasSameFixtureClass()) return; + + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported(this, &Test::SetUp, "SetUp()"); + // We will run the test only if SetUp() was successful. + if (!HasFatalFailure()) { + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + this, &Test::TestBody, "the test body"); + } + + // However, we want to clean up as much as possible. Hence we will + // always call TearDown(), even if SetUp() or the test body has + // failed. + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + this, &Test::TearDown, "TearDown()"); +} + +// Returns true iff the current test has a fatal failure. +bool Test::HasFatalFailure() { + return internal::GetUnitTestImpl()->current_test_result()->HasFatalFailure(); +} + +// Returns true iff the current test has a non-fatal failure. +bool Test::HasNonfatalFailure() { + return internal::GetUnitTestImpl()->current_test_result()-> + HasNonfatalFailure(); +} + +// class TestInfo + +// Constructs a TestInfo object. It assumes ownership of the test factory +// object. +TestInfo::TestInfo(const std::string& a_test_case_name, + const std::string& a_name, + const char* a_type_param, + const char* a_value_param, + internal::TypeId fixture_class_id, + internal::TestFactoryBase* factory) + : test_case_name_(a_test_case_name), + name_(a_name), + type_param_(a_type_param ? new std::string(a_type_param) : NULL), + value_param_(a_value_param ? new std::string(a_value_param) : NULL), + fixture_class_id_(fixture_class_id), + should_run_(false), + is_disabled_(false), + matches_filter_(false), + factory_(factory), + result_() {} + +// Destructs a TestInfo object. +TestInfo::~TestInfo() { delete factory_; } + +namespace internal { + +// Creates a new TestInfo object and registers it with Google Test; +// returns the created object. +// +// Arguments: +// +// test_case_name: name of the test case +// name: name of the test +// type_param: the name of the test's type parameter, or NULL if +// this is not a typed or a type-parameterized test. +// value_param: text representation of the test's value parameter, +// or NULL if this is not a value-parameterized test. +// fixture_class_id: ID of the test fixture class +// set_up_tc: pointer to the function that sets up the test case +// tear_down_tc: pointer to the function that tears down the test case +// factory: pointer to the factory that creates a test object. +// The newly created TestInfo instance will assume +// ownership of the factory object. +TestInfo* MakeAndRegisterTestInfo( + const char* test_case_name, + const char* name, + const char* type_param, + const char* value_param, + TypeId fixture_class_id, + SetUpTestCaseFunc set_up_tc, + TearDownTestCaseFunc tear_down_tc, + TestFactoryBase* factory) { + TestInfo* const test_info = + new TestInfo(test_case_name, name, type_param, value_param, + fixture_class_id, factory); + GetUnitTestImpl()->AddTestInfo(set_up_tc, tear_down_tc, test_info); + return test_info; +} + +#if GTEST_HAS_PARAM_TEST +void ReportInvalidTestCaseType(const char* test_case_name, + const char* file, int line) { + Message errors; + errors + << "Attempted redefinition of test case " << test_case_name << ".\n" + << "All tests in the same test case must use the same test fixture\n" + << "class. However, in test case " << test_case_name << ", you tried\n" + << "to define a test using a fixture class different from the one\n" + << "used earlier. This can happen if the two fixture classes are\n" + << "from different namespaces and have the same name. You should\n" + << "probably rename one of the classes to put the tests into different\n" + << "test cases."; + + fprintf(stderr, "%s %s", FormatFileLocation(file, line).c_str(), + errors.GetString().c_str()); +} +#endif // GTEST_HAS_PARAM_TEST + +} // namespace internal + +namespace { + +// A predicate that checks the test name of a TestInfo against a known +// value. +// +// This is used for implementation of the TestCase class only. We put +// it in the anonymous namespace to prevent polluting the outer +// namespace. +// +// TestNameIs is copyable. + +//Commenting out this class since its not used and wherefor produces warnings +// class TestNameIs { +// public: +// // Constructor. +// // +// // TestNameIs has NO default constructor. +// explicit TestNameIs(const char* name) +// : name_(name) {} +// +// // Returns true iff the test name of test_info matches name_. +// bool operator()(const TestInfo * test_info) const { +// return test_info && test_info->name() == name_; +// } +// +// private: +// std::string name_; +//}; + +} // namespace + +namespace internal { + +// This method expands all parameterized tests registered with macros TEST_P +// and INSTANTIATE_TEST_CASE_P into regular tests and registers those. +// This will be done just once during the program runtime. +void UnitTestImpl::RegisterParameterizedTests() { +#if GTEST_HAS_PARAM_TEST + if (!parameterized_tests_registered_) { + parameterized_test_registry_.RegisterTests(); + parameterized_tests_registered_ = true; + } +#endif +} + +} // namespace internal + +// Creates the test object, runs it, records its result, and then +// deletes it. +void TestInfo::Run() { + if (!should_run_) return; + + // Tells UnitTest where to store test result. + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + impl->set_current_test_info(this); + + TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater(); + + // Notifies the unit test event listeners that a test is about to start. + repeater->OnTestStart(*this); + + const TimeInMillis start = internal::GetTimeInMillis(); + + impl->os_stack_trace_getter()->UponLeavingGTest(); + + // Creates the test object. + Test* const test = internal::HandleExceptionsInMethodIfSupported( + factory_, &internal::TestFactoryBase::CreateTest, + "the test fixture's constructor"); + + // Runs the test only if the test object was created and its + // constructor didn't generate a fatal failure. + if ((test != NULL) && !Test::HasFatalFailure()) { + // This doesn't throw as all user code that can throw are wrapped into + // exception handling code. + test->Run(); + } + + // Deletes the test object. + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + test, &Test::DeleteSelf_, "the test fixture's destructor"); + + result_.set_elapsed_time(internal::GetTimeInMillis() - start); + + // Notifies the unit test event listener that a test has just finished. + repeater->OnTestEnd(*this); + + // Tells UnitTest to stop associating assertion results to this + // test. + impl->set_current_test_info(NULL); +} + +// class TestCase + +// Gets the number of successful tests in this test case. +int TestCase::successful_test_count() const { + return CountIf(test_info_list_, TestPassed); +} + +// Gets the number of failed tests in this test case. +int TestCase::failed_test_count() const { + return CountIf(test_info_list_, TestFailed); +} + +// Gets the number of disabled tests that will be reported in the XML report. +int TestCase::reportable_disabled_test_count() const { + return CountIf(test_info_list_, TestReportableDisabled); +} + +// Gets the number of disabled tests in this test case. +int TestCase::disabled_test_count() const { + return CountIf(test_info_list_, TestDisabled); +} + +// Gets the number of tests to be printed in the XML report. +int TestCase::reportable_test_count() const { + return CountIf(test_info_list_, TestReportable); +} + +// Get the number of tests in this test case that should run. +int TestCase::test_to_run_count() const { + return CountIf(test_info_list_, ShouldRunTest); +} + +// Gets the number of all tests. +int TestCase::total_test_count() const { + return static_cast(test_info_list_.size()); +} + +// Creates a TestCase with the given name. +// +// Arguments: +// +// name: name of the test case +// a_type_param: the name of the test case's type parameter, or NULL if +// this is not a typed or a type-parameterized test case. +// set_up_tc: pointer to the function that sets up the test case +// tear_down_tc: pointer to the function that tears down the test case +TestCase::TestCase(const char* a_name, const char* a_type_param, + Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc) + : name_(a_name), + type_param_(a_type_param ? new std::string(a_type_param) : NULL), + set_up_tc_(set_up_tc), + tear_down_tc_(tear_down_tc), + should_run_(false), + elapsed_time_(0) { +} + +// Destructor of TestCase. +TestCase::~TestCase() { + // Deletes every Test in the collection. + ForEach(test_info_list_, internal::Delete); +} + +// Returns the i-th test among all the tests. i can range from 0 to +// total_test_count() - 1. If i is not in that range, returns NULL. +const TestInfo* TestCase::GetTestInfo(int i) const { + const int index = GetElementOr(test_indices_, i, -1); + return index < 0 ? NULL : test_info_list_[index]; +} + +// Returns the i-th test among all the tests. i can range from 0 to +// total_test_count() - 1. If i is not in that range, returns NULL. +TestInfo* TestCase::GetMutableTestInfo(int i) { + const int index = GetElementOr(test_indices_, i, -1); + return index < 0 ? NULL : test_info_list_[index]; +} + +// Adds a test to this test case. Will delete the test upon +// destruction of the TestCase object. +void TestCase::AddTestInfo(TestInfo * test_info) { + test_info_list_.push_back(test_info); + test_indices_.push_back(static_cast(test_indices_.size())); +} + +// Runs every test in this TestCase. +void TestCase::Run() { + if (!should_run_) return; + + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + impl->set_current_test_case(this); + + TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater(); + + repeater->OnTestCaseStart(*this); + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + this, &TestCase::RunSetUpTestCase, "SetUpTestCase()"); + + const internal::TimeInMillis start = internal::GetTimeInMillis(); + for (int i = 0; i < total_test_count(); i++) { + GetMutableTestInfo(i)->Run(); + } + elapsed_time_ = internal::GetTimeInMillis() - start; + + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + this, &TestCase::RunTearDownTestCase, "TearDownTestCase()"); + + repeater->OnTestCaseEnd(*this); + impl->set_current_test_case(NULL); +} + +// Clears the results of all tests in this test case. +void TestCase::ClearResult() { + ad_hoc_test_result_.Clear(); + ForEach(test_info_list_, TestInfo::ClearTestResult); +} + +// Shuffles the tests in this test case. +void TestCase::ShuffleTests(internal::Random* random) { + Shuffle(random, &test_indices_); +} + +// Restores the test order to before the first shuffle. +void TestCase::UnshuffleTests() { + for (size_t i = 0; i < test_indices_.size(); i++) { + test_indices_[i] = static_cast(i); + } +} + +// Formats a countable noun. Depending on its quantity, either the +// singular form or the plural form is used. e.g. +// +// FormatCountableNoun(1, "formula", "formuli") returns "1 formula". +// FormatCountableNoun(5, "book", "books") returns "5 books". +static std::string FormatCountableNoun(int count, + const char * singular_form, + const char * plural_form) { + return internal::StreamableToString(count) + " " + + (count == 1 ? singular_form : plural_form); +} + +// Formats the count of tests. +static std::string FormatTestCount(int test_count) { + return FormatCountableNoun(test_count, "test", "tests"); +} + +// Formats the count of test cases. +static std::string FormatTestCaseCount(int test_case_count) { + return FormatCountableNoun(test_case_count, "test case", "test cases"); +} + +// Converts a TestPartResult::Type enum to human-friendly string +// representation. Both kNonFatalFailure and kFatalFailure are translated +// to "Failure", as the user usually doesn't care about the difference +// between the two when viewing the test result. +static const char * TestPartResultTypeToString(TestPartResult::Type type) { + switch (type) { + case TestPartResult::kSuccess: + return "Success"; + + case TestPartResult::kNonFatalFailure: + case TestPartResult::kFatalFailure: +#ifdef _MSC_VER + return "error: "; +#else + return "Failure\n"; +#endif + default: + return "Unknown result type"; + } +} + +namespace internal { + +// Prints a TestPartResult to an std::string. +static std::string PrintTestPartResultToString( + const TestPartResult& test_part_result) { + return (Message() + << internal::FormatFileLocation(test_part_result.file_name(), + test_part_result.line_number()) + << " " << TestPartResultTypeToString(test_part_result.type()) + << test_part_result.message()).GetString(); +} + +// Prints a TestPartResult. +static void PrintTestPartResult(const TestPartResult& test_part_result) { + const std::string& result = + PrintTestPartResultToString(test_part_result); + printf("%s\n", result.c_str()); + fflush(stdout); + // If the test program runs in Visual Studio or a debugger, the + // following statements add the test part result message to the Output + // window such that the user can double-click on it to jump to the + // corresponding source code location; otherwise they do nothing. +#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE + // We don't call OutputDebugString*() on Windows Mobile, as printing + // to stdout is done by OutputDebugString() there already - we don't + // want the same message printed twice. + ::OutputDebugStringA(result.c_str()); + ::OutputDebugStringA("\n"); +#endif +} + +// class PrettyUnitTestResultPrinter + +enum GTestColor { + COLOR_DEFAULT, + COLOR_RED, + COLOR_GREEN, + COLOR_YELLOW +}; + +#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE + +// Returns the character attribute for the given color. +WORD GetColorAttribute(GTestColor color) { + switch (color) { + case COLOR_RED: return FOREGROUND_RED; + case COLOR_GREEN: return FOREGROUND_GREEN; + case COLOR_YELLOW: return FOREGROUND_RED | FOREGROUND_GREEN; + default: return 0; + } +} + +#else + +// Returns the ANSI color code for the given color. COLOR_DEFAULT is +// an invalid input. +const char* GetAnsiColorCode(GTestColor color) { + switch (color) { + case COLOR_RED: return "1"; + case COLOR_GREEN: return "2"; + case COLOR_YELLOW: return "3"; + default: return NULL; + }; +} + +#endif // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE + +// Returns true iff Google Test should use colors in the output. +bool ShouldUseColor(bool stdout_is_tty) { + const char* const gtest_color = GTEST_FLAG(color).c_str(); + + if (String::CaseInsensitiveCStringEquals(gtest_color, "auto")) { +#if GTEST_OS_WINDOWS + // On Windows the TERM variable is usually not set, but the + // console there does support colors. + return stdout_is_tty; +#else + // On non-Windows platforms, we rely on the TERM variable. + const char* const term = posix::GetEnv("TERM"); + const bool term_supports_color = + String::CStringEquals(term, "xterm") || + String::CStringEquals(term, "xterm-color") || + String::CStringEquals(term, "xterm-256color") || + String::CStringEquals(term, "screen") || + String::CStringEquals(term, "screen-256color") || + String::CStringEquals(term, "linux") || + String::CStringEquals(term, "cygwin"); + return stdout_is_tty && term_supports_color; +#endif // GTEST_OS_WINDOWS + } + + return String::CaseInsensitiveCStringEquals(gtest_color, "yes") || + String::CaseInsensitiveCStringEquals(gtest_color, "true") || + String::CaseInsensitiveCStringEquals(gtest_color, "t") || + String::CStringEquals(gtest_color, "1"); + // We take "yes", "true", "t", and "1" as meaning "yes". If the + // value is neither one of these nor "auto", we treat it as "no" to + // be conservative. +} + +// Helpers for printing colored strings to stdout. Note that on Windows, we +// cannot simply emit special characters and have the terminal change colors. +// This routine must actually emit the characters rather than return a string +// that would be colored when printed, as can be done on Linux. +void ColoredPrintf(GTestColor color, const char* fmt, ...) { + va_list args; + va_start(args, fmt); + +#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || GTEST_OS_ZOS || GTEST_OS_IOS + const bool use_color = false; +#else + static const bool in_color_mode = + ShouldUseColor(posix::IsATTY(posix::FileNo(stdout)) != 0); + const bool use_color = in_color_mode && (color != COLOR_DEFAULT); +#endif // GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || GTEST_OS_ZOS + // The '!= 0' comparison is necessary to satisfy MSVC 7.1. + + if (!use_color) { + vprintf(fmt, args); + va_end(args); + return; + } + +#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE + const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE); + + // Gets the current text color. + CONSOLE_SCREEN_BUFFER_INFO buffer_info; + GetConsoleScreenBufferInfo(stdout_handle, &buffer_info); + const WORD old_color_attrs = buffer_info.wAttributes; + + // We need to flush the stream buffers into the console before each + // SetConsoleTextAttribute call lest it affect the text that is already + // printed but has not yet reached the console. + fflush(stdout); + SetConsoleTextAttribute(stdout_handle, + GetColorAttribute(color) | FOREGROUND_INTENSITY); + vprintf(fmt, args); + + fflush(stdout); + // Restores the text color. + SetConsoleTextAttribute(stdout_handle, old_color_attrs); +#else + printf("\033[0;3%sm", GetAnsiColorCode(color)); + vprintf(fmt, args); + printf("\033[m"); // Resets the terminal to default. +#endif // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE + va_end(args); +} + +// Text printed in Google Test's text output and --gunit_list_tests +// output to label the type parameter and value parameter for a test. +static const char kTypeParamLabel[] = "TypeParam"; +static const char kValueParamLabel[] = "GetParam()"; + +void PrintFullTestCommentIfPresent(const TestInfo& test_info) { + const char* const type_param = test_info.type_param(); + const char* const value_param = test_info.value_param(); + + if (type_param != NULL || value_param != NULL) { + printf(", where "); + if (type_param != NULL) { + printf("%s = %s", kTypeParamLabel, type_param); + if (value_param != NULL) + printf(" and "); + } + if (value_param != NULL) { + printf("%s = %s", kValueParamLabel, value_param); + } + } +} + +// This class implements the TestEventListener interface. +// +// Class PrettyUnitTestResultPrinter is copyable. +class PrettyUnitTestResultPrinter : public TestEventListener { + public: + PrettyUnitTestResultPrinter() {} + static void PrintTestName(const char * test_case, const char * test) { + printf("%s.%s", test_case, test); + } + + // The following methods override what's in the TestEventListener class. + virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {} + virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration); + virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test); + virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) {} + virtual void OnTestCaseStart(const TestCase& test_case); + virtual void OnTestStart(const TestInfo& test_info); + virtual void OnTestPartResult(const TestPartResult& result); + virtual void OnTestEnd(const TestInfo& test_info); + virtual void OnTestCaseEnd(const TestCase& test_case); + virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test); + virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) {} + virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration); + virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {} + + private: + static void PrintFailedTests(const UnitTest& unit_test); +}; + + // Fired before each iteration of tests starts. +void PrettyUnitTestResultPrinter::OnTestIterationStart( + const UnitTest& unit_test, int iteration) { + if (GTEST_FLAG(repeat) != 1) + printf("\nRepeating all tests (iteration %d) . . .\n\n", iteration + 1); + + const char* const filter = GTEST_FLAG(filter).c_str(); + + // Prints the filter if it's not *. This reminds the user that some + // tests may be skipped. + if (!String::CStringEquals(filter, kUniversalFilter)) { + ColoredPrintf(COLOR_YELLOW, + "Note: %s filter = %s\n", GTEST_NAME_, filter); + } + + if (internal::ShouldShard(kTestTotalShards, kTestShardIndex, false)) { + const Int32 shard_index = Int32FromEnvOrDie(kTestShardIndex, -1); + ColoredPrintf(COLOR_YELLOW, + "Note: This is test shard %d of %s.\n", + static_cast(shard_index) + 1, + internal::posix::GetEnv(kTestTotalShards)); + } + + if (GTEST_FLAG(shuffle)) { + ColoredPrintf(COLOR_YELLOW, + "Note: Randomizing tests' orders with a seed of %d .\n", + unit_test.random_seed()); + } + + ColoredPrintf(COLOR_GREEN, "[==========] "); + printf("Running %s from %s.\n", + FormatTestCount(unit_test.test_to_run_count()).c_str(), + FormatTestCaseCount(unit_test.test_case_to_run_count()).c_str()); + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnEnvironmentsSetUpStart( + const UnitTest& /*unit_test*/) { + ColoredPrintf(COLOR_GREEN, "[----------] "); + printf("Global test environment set-up.\n"); + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnTestCaseStart(const TestCase& test_case) { + const std::string counts = + FormatCountableNoun(test_case.test_to_run_count(), "test", "tests"); + ColoredPrintf(COLOR_GREEN, "[----------] "); + printf("%s from %s", counts.c_str(), test_case.name()); + if (test_case.type_param() == NULL) { + printf("\n"); + } else { + printf(", where %s = %s\n", kTypeParamLabel, test_case.type_param()); + } + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnTestStart(const TestInfo& test_info) { + ColoredPrintf(COLOR_GREEN, "[ RUN ] "); + PrintTestName(test_info.test_case_name(), test_info.name()); + printf("\n"); + fflush(stdout); +} + +// Called after an assertion failure. +void PrettyUnitTestResultPrinter::OnTestPartResult( + const TestPartResult& result) { + // If the test part succeeded, we don't need to do anything. + if (result.type() == TestPartResult::kSuccess) + return; + + // Print failure message from the assertion (e.g. expected this and got that). + PrintTestPartResult(result); + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnTestEnd(const TestInfo& test_info) { + if (test_info.result()->Passed()) { + ColoredPrintf(COLOR_GREEN, "[ OK ] "); + } else { + ColoredPrintf(COLOR_RED, "[ FAILED ] "); + } + PrintTestName(test_info.test_case_name(), test_info.name()); + if (test_info.result()->Failed()) + PrintFullTestCommentIfPresent(test_info); + + if (GTEST_FLAG(print_time)) { + printf(" (%s ms)\n", internal::StreamableToString( + test_info.result()->elapsed_time()).c_str()); + } else { + printf("\n"); + } + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnTestCaseEnd(const TestCase& test_case) { + if (!GTEST_FLAG(print_time)) return; + + const std::string counts = + FormatCountableNoun(test_case.test_to_run_count(), "test", "tests"); + ColoredPrintf(COLOR_GREEN, "[----------] "); + printf("%s from %s (%s ms total)\n\n", + counts.c_str(), test_case.name(), + internal::StreamableToString(test_case.elapsed_time()).c_str()); + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnEnvironmentsTearDownStart( + const UnitTest& /*unit_test*/) { + ColoredPrintf(COLOR_GREEN, "[----------] "); + printf("Global test environment tear-down\n"); + fflush(stdout); +} + +// Internal helper for printing the list of failed tests. +void PrettyUnitTestResultPrinter::PrintFailedTests(const UnitTest& unit_test) { + const int failed_test_count = unit_test.failed_test_count(); + if (failed_test_count == 0) { + return; + } + + for (int i = 0; i < unit_test.total_test_case_count(); ++i) { + const TestCase& test_case = *unit_test.GetTestCase(i); + if (!test_case.should_run() || (test_case.failed_test_count() == 0)) { + continue; + } + for (int j = 0; j < test_case.total_test_count(); ++j) { + const TestInfo& test_info = *test_case.GetTestInfo(j); + if (!test_info.should_run() || test_info.result()->Passed()) { + continue; + } + ColoredPrintf(COLOR_RED, "[ FAILED ] "); + printf("%s.%s", test_case.name(), test_info.name()); + PrintFullTestCommentIfPresent(test_info); + printf("\n"); + } + } +} + +void PrettyUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test, + int /*iteration*/) { + ColoredPrintf(COLOR_GREEN, "[==========] "); + printf("%s from %s ran.", + FormatTestCount(unit_test.test_to_run_count()).c_str(), + FormatTestCaseCount(unit_test.test_case_to_run_count()).c_str()); + if (GTEST_FLAG(print_time)) { + printf(" (%s ms total)", + internal::StreamableToString(unit_test.elapsed_time()).c_str()); + } + printf("\n"); + ColoredPrintf(COLOR_GREEN, "[ PASSED ] "); + printf("%s.\n", FormatTestCount(unit_test.successful_test_count()).c_str()); + + int num_failures = unit_test.failed_test_count(); + if (!unit_test.Passed()) { + const int failed_test_count = unit_test.failed_test_count(); + ColoredPrintf(COLOR_RED, "[ FAILED ] "); + printf("%s, listed below:\n", FormatTestCount(failed_test_count).c_str()); + PrintFailedTests(unit_test); + printf("\n%2d FAILED %s\n", num_failures, + num_failures == 1 ? "TEST" : "TESTS"); + } + + int num_disabled = unit_test.reportable_disabled_test_count(); + if (num_disabled && !GTEST_FLAG(also_run_disabled_tests)) { + if (!num_failures) { + printf("\n"); // Add a spacer if no FAILURE banner is displayed. + } + ColoredPrintf(COLOR_YELLOW, + " YOU HAVE %d DISABLED %s\n\n", + num_disabled, + num_disabled == 1 ? "TEST" : "TESTS"); + } + // Ensure that Google Test output is printed before, e.g., heapchecker output. + fflush(stdout); +} + +// End PrettyUnitTestResultPrinter + +// class TestEventRepeater +// +// This class forwards events to other event listeners. +class TestEventRepeater : public TestEventListener { + public: + TestEventRepeater() : forwarding_enabled_(true) {} + virtual ~TestEventRepeater(); + void Append(TestEventListener *listener); + TestEventListener* Release(TestEventListener* listener); + + // Controls whether events will be forwarded to listeners_. Set to false + // in death test child processes. + bool forwarding_enabled() const { return forwarding_enabled_; } + void set_forwarding_enabled(bool enable) { forwarding_enabled_ = enable; } + + virtual void OnTestProgramStart(const UnitTest& unit_test); + virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration); + virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test); + virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test); + virtual void OnTestCaseStart(const TestCase& test_case); + virtual void OnTestStart(const TestInfo& test_info); + virtual void OnTestPartResult(const TestPartResult& result); + virtual void OnTestEnd(const TestInfo& test_info); + virtual void OnTestCaseEnd(const TestCase& test_case); + virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test); + virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test); + virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration); + virtual void OnTestProgramEnd(const UnitTest& unit_test); + + private: + // Controls whether events will be forwarded to listeners_. Set to false + // in death test child processes. + bool forwarding_enabled_; + // The list of listeners that receive events. + std::vector listeners_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventRepeater); +}; + +TestEventRepeater::~TestEventRepeater() { + ForEach(listeners_, Delete); +} + +void TestEventRepeater::Append(TestEventListener *listener) { + listeners_.push_back(listener); +} + +// TODO(vladl@google.com): Factor the search functionality into Vector::Find. +TestEventListener* TestEventRepeater::Release(TestEventListener *listener) { + for (size_t i = 0; i < listeners_.size(); ++i) { + if (listeners_[i] == listener) { + listeners_.erase(listeners_.begin() + i); + return listener; + } + } + + return NULL; +} + +// Since most methods are very similar, use macros to reduce boilerplate. +// This defines a member that forwards the call to all listeners. +#define GTEST_REPEATER_METHOD_(Name, Type) \ +void TestEventRepeater::Name(const Type& parameter) { \ + if (forwarding_enabled_) { \ + for (size_t i = 0; i < listeners_.size(); i++) { \ + listeners_[i]->Name(parameter); \ + } \ + } \ +} +// This defines a member that forwards the call to all listeners in reverse +// order. +#define GTEST_REVERSE_REPEATER_METHOD_(Name, Type) \ +void TestEventRepeater::Name(const Type& parameter) { \ + if (forwarding_enabled_) { \ + for (int i = static_cast(listeners_.size()) - 1; i >= 0; i--) { \ + listeners_[i]->Name(parameter); \ + } \ + } \ +} + +GTEST_REPEATER_METHOD_(OnTestProgramStart, UnitTest) +GTEST_REPEATER_METHOD_(OnEnvironmentsSetUpStart, UnitTest) +GTEST_REPEATER_METHOD_(OnTestCaseStart, TestCase) +GTEST_REPEATER_METHOD_(OnTestStart, TestInfo) +GTEST_REPEATER_METHOD_(OnTestPartResult, TestPartResult) +GTEST_REPEATER_METHOD_(OnEnvironmentsTearDownStart, UnitTest) +GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsSetUpEnd, UnitTest) +GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsTearDownEnd, UnitTest) +GTEST_REVERSE_REPEATER_METHOD_(OnTestEnd, TestInfo) +GTEST_REVERSE_REPEATER_METHOD_(OnTestCaseEnd, TestCase) +GTEST_REVERSE_REPEATER_METHOD_(OnTestProgramEnd, UnitTest) + +#undef GTEST_REPEATER_METHOD_ +#undef GTEST_REVERSE_REPEATER_METHOD_ + +void TestEventRepeater::OnTestIterationStart(const UnitTest& unit_test, + int iteration) { + if (forwarding_enabled_) { + for (size_t i = 0; i < listeners_.size(); i++) { + listeners_[i]->OnTestIterationStart(unit_test, iteration); + } + } +} + +void TestEventRepeater::OnTestIterationEnd(const UnitTest& unit_test, + int iteration) { + if (forwarding_enabled_) { + for (int i = static_cast(listeners_.size()) - 1; i >= 0; i--) { + listeners_[i]->OnTestIterationEnd(unit_test, iteration); + } + } +} + +// End TestEventRepeater + +// This class generates an XML output file. +class XmlUnitTestResultPrinter : public EmptyTestEventListener { + public: + explicit XmlUnitTestResultPrinter(const char* output_file); + + virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration); + + private: + // Is c a whitespace character that is normalized to a space character + // when it appears in an XML attribute value? + static bool IsNormalizableWhitespace(char c) { + return c == 0x9 || c == 0xA || c == 0xD; + } + + // May c appear in a well-formed XML document? + static bool IsValidXmlCharacter(char c) { + return IsNormalizableWhitespace(c) || c >= 0x20; + } + + // Returns an XML-escaped copy of the input string str. If + // is_attribute is true, the text is meant to appear as an attribute + // value, and normalizable whitespace is preserved by replacing it + // with character references. + static std::string EscapeXml(const std::string& str, bool is_attribute); + + // Returns the given string with all characters invalid in XML removed. + static std::string RemoveInvalidXmlCharacters(const std::string& str); + + // Convenience wrapper around EscapeXml when str is an attribute value. + static std::string EscapeXmlAttribute(const std::string& str) { + return EscapeXml(str, true); + } + + // Convenience wrapper around EscapeXml when str is not an attribute value. + static std::string EscapeXmlText(const char* str) { + return EscapeXml(str, false); + } + + // Verifies that the given attribute belongs to the given element and + // streams the attribute as XML. + static void OutputXmlAttribute(std::ostream* stream, + const std::string& element_name, + const std::string& name, + const std::string& value); + + // Streams an XML CDATA section, escaping invalid CDATA sequences as needed. + static void OutputXmlCDataSection(::std::ostream* stream, const char* data); + + // Streams an XML representation of a TestInfo object. + static void OutputXmlTestInfo(::std::ostream* stream, + const char* test_case_name, + const TestInfo& test_info); + + // Prints an XML representation of a TestCase object + static void PrintXmlTestCase(::std::ostream* stream, + const TestCase& test_case); + + // Prints an XML summary of unit_test to output stream out. + static void PrintXmlUnitTest(::std::ostream* stream, + const UnitTest& unit_test); + + // Produces a string representing the test properties in a result as space + // delimited XML attributes based on the property key="value" pairs. + // When the std::string is not empty, it includes a space at the beginning, + // to delimit this attribute from prior attributes. + static std::string TestPropertiesAsXmlAttributes(const TestResult& result); + + // The output file. + const std::string output_file_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(XmlUnitTestResultPrinter); +}; + +// Creates a new XmlUnitTestResultPrinter. +XmlUnitTestResultPrinter::XmlUnitTestResultPrinter(const char* output_file) + : output_file_(output_file) { + if (output_file_.c_str() == NULL || output_file_.empty()) { + fprintf(stderr, "XML output file may not be null\n"); + fflush(stderr); + exit(EXIT_FAILURE); + } +} + +// Called after the unit test ends. +void XmlUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test, + int /*iteration*/) { + FILE* xmlout = NULL; + FilePath output_file(output_file_); + FilePath output_dir(output_file.RemoveFileName()); + + if (output_dir.CreateDirectoriesRecursively()) { + xmlout = posix::FOpen(output_file_.c_str(), "w"); + } + if (xmlout == NULL) { + // TODO(wan): report the reason of the failure. + // + // We don't do it for now as: + // + // 1. There is no urgent need for it. + // 2. It's a bit involved to make the errno variable thread-safe on + // all three operating systems (Linux, Windows, and Mac OS). + // 3. To interpret the meaning of errno in a thread-safe way, + // we need the strerror_r() function, which is not available on + // Windows. + fprintf(stderr, + "Unable to open file \"%s\"\n", + output_file_.c_str()); + fflush(stderr); + exit(EXIT_FAILURE); + } + std::stringstream stream; + PrintXmlUnitTest(&stream, unit_test); + fprintf(xmlout, "%s", StringStreamToString(&stream).c_str()); + fclose(xmlout); +} + +// Returns an XML-escaped copy of the input string str. If is_attribute +// is true, the text is meant to appear as an attribute value, and +// normalizable whitespace is preserved by replacing it with character +// references. +// +// Invalid XML characters in str, if any, are stripped from the output. +// It is expected that most, if not all, of the text processed by this +// module will consist of ordinary English text. +// If this module is ever modified to produce version 1.1 XML output, +// most invalid characters can be retained using character references. +// TODO(wan): It might be nice to have a minimally invasive, human-readable +// escaping scheme for invalid characters, rather than dropping them. +std::string XmlUnitTestResultPrinter::EscapeXml( + const std::string& str, bool is_attribute) { + Message m; + + for (size_t i = 0; i < str.size(); ++i) { + const char ch = str[i]; + switch (ch) { + case '<': + m << "<"; + break; + case '>': + m << ">"; + break; + case '&': + m << "&"; + break; + case '\'': + if (is_attribute) + m << "'"; + else + m << '\''; + break; + case '"': + if (is_attribute) + m << """; + else + m << '"'; + break; + default: + if (IsValidXmlCharacter(ch)) { + if (is_attribute && IsNormalizableWhitespace(ch)) + m << "&#x" << String::FormatByte(static_cast(ch)) + << ";"; + else + m << ch; + } + break; + } + } + + return m.GetString(); +} + +// Returns the given string with all characters invalid in XML removed. +// Currently invalid characters are dropped from the string. An +// alternative is to replace them with certain characters such as . or ?. +std::string XmlUnitTestResultPrinter::RemoveInvalidXmlCharacters( + const std::string& str) { + std::string output; + output.reserve(str.size()); + for (std::string::const_iterator it = str.begin(); it != str.end(); ++it) + if (IsValidXmlCharacter(*it)) + output.push_back(*it); + + return output; +} + +// The following routines generate an XML representation of a UnitTest +// object. +// +// This is how Google Test concepts map to the DTD: +// +// <-- corresponds to a UnitTest object +// <-- corresponds to a TestCase object +// <-- corresponds to a TestInfo object +// ... +// ... +// ... +// <-- individual assertion failures +// +// +// + +// Formats the given time in milliseconds as seconds. +std::string FormatTimeInMillisAsSeconds(TimeInMillis ms) { + ::std::stringstream ss; + ss << ms/1000.0; + return ss.str(); +} + +// Converts the given epoch time in milliseconds to a date string in the ISO +// 8601 format, without the timezone information. +std::string FormatEpochTimeInMillisAsIso8601(TimeInMillis ms) { + // Using non-reentrant version as localtime_r is not portable. + time_t seconds = static_cast(ms / 1000); +#ifdef _MSC_VER +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4996) // Temporarily disables warning 4996 + // (function or variable may be unsafe). + const struct tm* const time_struct = localtime(&seconds); // NOLINT +# pragma warning(pop) // Restores the warning state again. +#else + const struct tm* const time_struct = localtime(&seconds); // NOLINT +#endif + if (time_struct == NULL) + return ""; // Invalid ms value + + // YYYY-MM-DDThh:mm:ss + return StreamableToString(time_struct->tm_year + 1900) + "-" + + String::FormatIntWidth2(time_struct->tm_mon + 1) + "-" + + String::FormatIntWidth2(time_struct->tm_mday) + "T" + + String::FormatIntWidth2(time_struct->tm_hour) + ":" + + String::FormatIntWidth2(time_struct->tm_min) + ":" + + String::FormatIntWidth2(time_struct->tm_sec); +} + +// Streams an XML CDATA section, escaping invalid CDATA sequences as needed. +void XmlUnitTestResultPrinter::OutputXmlCDataSection(::std::ostream* stream, + const char* data) { + const char* segment = data; + *stream << ""); + if (next_segment != NULL) { + stream->write( + segment, static_cast(next_segment - segment)); + *stream << "]]>]]>"); + } else { + *stream << segment; + break; + } + } + *stream << "]]>"; +} + +void XmlUnitTestResultPrinter::OutputXmlAttribute( + std::ostream* stream, + const std::string& element_name, + const std::string& name, + const std::string& value) { + const std::vector& allowed_names = + GetReservedAttributesForElement(element_name); + + GTEST_CHECK_(std::find(allowed_names.begin(), allowed_names.end(), name) != + allowed_names.end()) + << "Attribute " << name << " is not allowed for element <" << element_name + << ">."; + + *stream << " " << name << "=\"" << EscapeXmlAttribute(value) << "\""; +} + +// Prints an XML representation of a TestInfo object. +// TODO(wan): There is also value in printing properties with the plain printer. +void XmlUnitTestResultPrinter::OutputXmlTestInfo(::std::ostream* stream, + const char* test_case_name, + const TestInfo& test_info) { + const TestResult& result = *test_info.result(); + const std::string kTestcase = "testcase"; + + *stream << " \n"; + } + const string location = internal::FormatCompilerIndependentFileLocation( + part.file_name(), part.line_number()); + const string summary = location + "\n" + part.summary(); + *stream << " "; + const string detail = location + "\n" + part.message(); + OutputXmlCDataSection(stream, RemoveInvalidXmlCharacters(detail).c_str()); + *stream << "\n"; + } + } + + if (failures == 0) + *stream << " />\n"; + else + *stream << " \n"; +} + +// Prints an XML representation of a TestCase object +void XmlUnitTestResultPrinter::PrintXmlTestCase(std::ostream* stream, + const TestCase& test_case) { + const std::string kTestsuite = "testsuite"; + *stream << " <" << kTestsuite; + OutputXmlAttribute(stream, kTestsuite, "name", test_case.name()); + OutputXmlAttribute(stream, kTestsuite, "tests", + StreamableToString(test_case.reportable_test_count())); + OutputXmlAttribute(stream, kTestsuite, "failures", + StreamableToString(test_case.failed_test_count())); + OutputXmlAttribute( + stream, kTestsuite, "disabled", + StreamableToString(test_case.reportable_disabled_test_count())); + OutputXmlAttribute(stream, kTestsuite, "errors", "0"); + OutputXmlAttribute(stream, kTestsuite, "time", + FormatTimeInMillisAsSeconds(test_case.elapsed_time())); + *stream << TestPropertiesAsXmlAttributes(test_case.ad_hoc_test_result()) + << ">\n"; + + for (int i = 0; i < test_case.total_test_count(); ++i) { + if (test_case.GetTestInfo(i)->is_reportable()) + OutputXmlTestInfo(stream, test_case.name(), *test_case.GetTestInfo(i)); + } + *stream << " \n"; +} + +// Prints an XML summary of unit_test to output stream out. +void XmlUnitTestResultPrinter::PrintXmlUnitTest(std::ostream* stream, + const UnitTest& unit_test) { + const std::string kTestsuites = "testsuites"; + + *stream << "\n"; + *stream << "<" << kTestsuites; + + OutputXmlAttribute(stream, kTestsuites, "tests", + StreamableToString(unit_test.reportable_test_count())); + OutputXmlAttribute(stream, kTestsuites, "failures", + StreamableToString(unit_test.failed_test_count())); + OutputXmlAttribute( + stream, kTestsuites, "disabled", + StreamableToString(unit_test.reportable_disabled_test_count())); + OutputXmlAttribute(stream, kTestsuites, "errors", "0"); + OutputXmlAttribute( + stream, kTestsuites, "timestamp", + FormatEpochTimeInMillisAsIso8601(unit_test.start_timestamp())); + OutputXmlAttribute(stream, kTestsuites, "time", + FormatTimeInMillisAsSeconds(unit_test.elapsed_time())); + + if (GTEST_FLAG(shuffle)) { + OutputXmlAttribute(stream, kTestsuites, "random_seed", + StreamableToString(unit_test.random_seed())); + } + + *stream << TestPropertiesAsXmlAttributes(unit_test.ad_hoc_test_result()); + + OutputXmlAttribute(stream, kTestsuites, "name", "AllTests"); + *stream << ">\n"; + + for (int i = 0; i < unit_test.total_test_case_count(); ++i) { + if (unit_test.GetTestCase(i)->reportable_test_count() > 0) + PrintXmlTestCase(stream, *unit_test.GetTestCase(i)); + } + *stream << "\n"; +} + +// Produces a string representing the test properties in a result as space +// delimited XML attributes based on the property key="value" pairs. +std::string XmlUnitTestResultPrinter::TestPropertiesAsXmlAttributes( + const TestResult& result) { + Message attributes; + for (int i = 0; i < result.test_property_count(); ++i) { + const TestProperty& property = result.GetTestProperty(i); + attributes << " " << property.key() << "=" + << "\"" << EscapeXmlAttribute(property.value()) << "\""; + } + return attributes.GetString(); +} + +// End XmlUnitTestResultPrinter + +#if GTEST_CAN_STREAM_RESULTS_ + +// Checks if str contains '=', '&', '%' or '\n' characters. If yes, +// replaces them by "%xx" where xx is their hexadecimal value. For +// example, replaces "=" with "%3D". This algorithm is O(strlen(str)) +// in both time and space -- important as the input str may contain an +// arbitrarily long test failure message and stack trace. +string StreamingListener::UrlEncode(const char* str) { + string result; + result.reserve(strlen(str) + 1); + for (char ch = *str; ch != '\0'; ch = *++str) { + switch (ch) { + case '%': + case '=': + case '&': + case '\n': + result.append("%" + String::FormatByte(static_cast(ch))); + break; + default: + result.push_back(ch); + break; + } + } + return result; +} + +void StreamingListener::SocketWriter::MakeConnection() { + GTEST_CHECK_(sockfd_ == -1) + << "MakeConnection() can't be called when there is already a connection."; + + addrinfo hints; + memset(&hints, 0, sizeof(hints)); + hints.ai_family = AF_UNSPEC; // To allow both IPv4 and IPv6 addresses. + hints.ai_socktype = SOCK_STREAM; + addrinfo* servinfo = NULL; + + // Use the getaddrinfo() to get a linked list of IP addresses for + // the given host name. + const int error_num = getaddrinfo( + host_name_.c_str(), port_num_.c_str(), &hints, &servinfo); + if (error_num != 0) { + GTEST_LOG_(WARNING) << "stream_result_to: getaddrinfo() failed: " + << gai_strerror(error_num); + } + + // Loop through all the results and connect to the first we can. + for (addrinfo* cur_addr = servinfo; sockfd_ == -1 && cur_addr != NULL; + cur_addr = cur_addr->ai_next) { + sockfd_ = socket( + cur_addr->ai_family, cur_addr->ai_socktype, cur_addr->ai_protocol); + if (sockfd_ != -1) { + // Connect the client socket to the server socket. + if (connect(sockfd_, cur_addr->ai_addr, cur_addr->ai_addrlen) == -1) { + close(sockfd_); + sockfd_ = -1; + } + } + } + + freeaddrinfo(servinfo); // all done with this structure + + if (sockfd_ == -1) { + GTEST_LOG_(WARNING) << "stream_result_to: failed to connect to " + << host_name_ << ":" << port_num_; + } +} + +// End of class Streaming Listener +#endif // GTEST_CAN_STREAM_RESULTS__ + +// Class ScopedTrace + +// Pushes the given source file location and message onto a per-thread +// trace stack maintained by Google Test. +ScopedTrace::ScopedTrace(const char* file, int line, const Message& message) + GTEST_LOCK_EXCLUDED_(&UnitTest::mutex_) { + TraceInfo trace; + trace.file = file; + trace.line = line; + trace.message = message.GetString(); + + UnitTest::GetInstance()->PushGTestTrace(trace); +} + +// Pops the info pushed by the c'tor. +ScopedTrace::~ScopedTrace() + GTEST_LOCK_EXCLUDED_(&UnitTest::mutex_) { + UnitTest::GetInstance()->PopGTestTrace(); +} + + +// class OsStackTraceGetter + +// Returns the current OS stack trace as an std::string. Parameters: +// +// max_depth - the maximum number of stack frames to be included +// in the trace. +// skip_count - the number of top frames to be skipped; doesn't count +// against max_depth. +// +string OsStackTraceGetter::CurrentStackTrace(int /* max_depth */, + int /* skip_count */) + GTEST_LOCK_EXCLUDED_(mutex_) { + return ""; +} + +void OsStackTraceGetter::UponLeavingGTest() + GTEST_LOCK_EXCLUDED_(mutex_) { +} + +const char* const +OsStackTraceGetter::kElidedFramesMarker = + "... " GTEST_NAME_ " internal frames ..."; + +// A helper class that creates the premature-exit file in its +// constructor and deletes the file in its destructor. +class ScopedPrematureExitFile { + public: + explicit ScopedPrematureExitFile(const char* premature_exit_filepath) + : premature_exit_filepath_(premature_exit_filepath) { + // If a path to the premature-exit file is specified... + if (premature_exit_filepath != NULL && *premature_exit_filepath != '\0') { + // create the file with a single "0" character in it. I/O + // errors are ignored as there's nothing better we can do and we + // don't want to fail the test because of this. + FILE* pfile = posix::FOpen(premature_exit_filepath, "w"); + fwrite("0", 1, 1, pfile); + fclose(pfile); + } + } + + ~ScopedPrematureExitFile() { + if (premature_exit_filepath_ != NULL && *premature_exit_filepath_ != '\0') { + remove(premature_exit_filepath_); + } + } + + private: + const char* const premature_exit_filepath_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedPrematureExitFile); +}; + +} // namespace internal + +// class TestEventListeners + +TestEventListeners::TestEventListeners() + : repeater_(new internal::TestEventRepeater()), + default_result_printer_(NULL), + default_xml_generator_(NULL) { +} + +TestEventListeners::~TestEventListeners() { delete repeater_; } + +// Returns the standard listener responsible for the default console +// output. Can be removed from the listeners list to shut down default +// console output. Note that removing this object from the listener list +// with Release transfers its ownership to the user. +void TestEventListeners::Append(TestEventListener* listener) { + repeater_->Append(listener); +} + +// Removes the given event listener from the list and returns it. It then +// becomes the caller's responsibility to delete the listener. Returns +// NULL if the listener is not found in the list. +TestEventListener* TestEventListeners::Release(TestEventListener* listener) { + if (listener == default_result_printer_) + default_result_printer_ = NULL; + else if (listener == default_xml_generator_) + default_xml_generator_ = NULL; + return repeater_->Release(listener); +} + +// Returns repeater that broadcasts the TestEventListener events to all +// subscribers. +TestEventListener* TestEventListeners::repeater() { return repeater_; } + +// Sets the default_result_printer attribute to the provided listener. +// The listener is also added to the listener list and previous +// default_result_printer is removed from it and deleted. The listener can +// also be NULL in which case it will not be added to the list. Does +// nothing if the previous and the current listener objects are the same. +void TestEventListeners::SetDefaultResultPrinter(TestEventListener* listener) { + if (default_result_printer_ != listener) { + // It is an error to pass this method a listener that is already in the + // list. + delete Release(default_result_printer_); + default_result_printer_ = listener; + if (listener != NULL) + Append(listener); + } +} + +// Sets the default_xml_generator attribute to the provided listener. The +// listener is also added to the listener list and previous +// default_xml_generator is removed from it and deleted. The listener can +// also be NULL in which case it will not be added to the list. Does +// nothing if the previous and the current listener objects are the same. +void TestEventListeners::SetDefaultXmlGenerator(TestEventListener* listener) { + if (default_xml_generator_ != listener) { + // It is an error to pass this method a listener that is already in the + // list. + delete Release(default_xml_generator_); + default_xml_generator_ = listener; + if (listener != NULL) + Append(listener); + } +} + +// Controls whether events will be forwarded by the repeater to the +// listeners in the list. +bool TestEventListeners::EventForwardingEnabled() const { + return repeater_->forwarding_enabled(); +} + +void TestEventListeners::SuppressEventForwarding() { + repeater_->set_forwarding_enabled(false); +} + +// class UnitTest + +// Gets the singleton UnitTest object. The first time this method is +// called, a UnitTest object is constructed and returned. Consecutive +// calls will return the same object. +// +// We don't protect this under mutex_ as a user is not supposed to +// call this before main() starts, from which point on the return +// value will never change. +UnitTest* UnitTest::GetInstance() { + // When compiled with MSVC 7.1 in optimized mode, destroying the + // UnitTest object upon exiting the program messes up the exit code, + // causing successful tests to appear failed. We have to use a + // different implementation in this case to bypass the compiler bug. + // This implementation makes the compiler happy, at the cost of + // leaking the UnitTest object. + + // CodeGear C++Builder insists on a public destructor for the + // default implementation. Use this implementation to keep good OO + // design with private destructor. + +#if (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__) + static UnitTest* const instance = new UnitTest; + return instance; +#else + static UnitTest instance; + return &instance; +#endif // (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__) +} + +// Gets the number of successful test cases. +int UnitTest::successful_test_case_count() const { + return impl()->successful_test_case_count(); +} + +// Gets the number of failed test cases. +int UnitTest::failed_test_case_count() const { + return impl()->failed_test_case_count(); +} + +// Gets the number of all test cases. +int UnitTest::total_test_case_count() const { + return impl()->total_test_case_count(); +} + +// Gets the number of all test cases that contain at least one test +// that should run. +int UnitTest::test_case_to_run_count() const { + return impl()->test_case_to_run_count(); +} + +// Gets the number of successful tests. +int UnitTest::successful_test_count() const { + return impl()->successful_test_count(); +} + +// Gets the number of failed tests. +int UnitTest::failed_test_count() const { return impl()->failed_test_count(); } + +// Gets the number of disabled tests that will be reported in the XML report. +int UnitTest::reportable_disabled_test_count() const { + return impl()->reportable_disabled_test_count(); +} + +// Gets the number of disabled tests. +int UnitTest::disabled_test_count() const { + return impl()->disabled_test_count(); +} + +// Gets the number of tests to be printed in the XML report. +int UnitTest::reportable_test_count() const { + return impl()->reportable_test_count(); +} + +// Gets the number of all tests. +int UnitTest::total_test_count() const { return impl()->total_test_count(); } + +// Gets the number of tests that should run. +int UnitTest::test_to_run_count() const { return impl()->test_to_run_count(); } + +// Gets the time of the test program start, in ms from the start of the +// UNIX epoch. +internal::TimeInMillis UnitTest::start_timestamp() const { + return impl()->start_timestamp(); +} + +// Gets the elapsed time, in milliseconds. +internal::TimeInMillis UnitTest::elapsed_time() const { + return impl()->elapsed_time(); +} + +// Returns true iff the unit test passed (i.e. all test cases passed). +bool UnitTest::Passed() const { return impl()->Passed(); } + +// Returns true iff the unit test failed (i.e. some test case failed +// or something outside of all tests failed). +bool UnitTest::Failed() const { return impl()->Failed(); } + +// Gets the i-th test case among all the test cases. i can range from 0 to +// total_test_case_count() - 1. If i is not in that range, returns NULL. +const TestCase* UnitTest::GetTestCase(int i) const { + return impl()->GetTestCase(i); +} + +// Returns the TestResult containing information on test failures and +// properties logged outside of individual test cases. +const TestResult& UnitTest::ad_hoc_test_result() const { + return *impl()->ad_hoc_test_result(); +} + +// Gets the i-th test case among all the test cases. i can range from 0 to +// total_test_case_count() - 1. If i is not in that range, returns NULL. +TestCase* UnitTest::GetMutableTestCase(int i) { + return impl()->GetMutableTestCase(i); +} + +// Returns the list of event listeners that can be used to track events +// inside Google Test. +TestEventListeners& UnitTest::listeners() { + return *impl()->listeners(); +} + +// Registers and returns a global test environment. When a test +// program is run, all global test environments will be set-up in the +// order they were registered. After all tests in the program have +// finished, all global test environments will be torn-down in the +// *reverse* order they were registered. +// +// The UnitTest object takes ownership of the given environment. +// +// We don't protect this under mutex_, as we only support calling it +// from the main thread. +Environment* UnitTest::AddEnvironment(Environment* env) { + if (env == NULL) { + return NULL; + } + + impl_->environments().push_back(env); + return env; +} + +// Adds a TestPartResult to the current TestResult object. All Google Test +// assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc) eventually call +// this to report their results. The user code should use the +// assertion macros instead of calling this directly. +void UnitTest::AddTestPartResult( + TestPartResult::Type result_type, + const char* file_name, + int line_number, + const std::string& message, + const std::string& os_stack_trace) GTEST_LOCK_EXCLUDED_(mutex_) { + Message msg; + msg << message; + + internal::MutexLock lock(&mutex_); + if (impl_->gtest_trace_stack().size() > 0) { + msg << "\n" << GTEST_NAME_ << " trace:"; + + for (int i = static_cast(impl_->gtest_trace_stack().size()); + i > 0; --i) { + const internal::TraceInfo& trace = impl_->gtest_trace_stack()[i - 1]; + msg << "\n" << internal::FormatFileLocation(trace.file, trace.line) + << " " << trace.message; + } + } + + if (os_stack_trace.c_str() != NULL && !os_stack_trace.empty()) { + msg << internal::kStackTraceMarker << os_stack_trace; + } + + const TestPartResult result = + TestPartResult(result_type, file_name, line_number, + msg.GetString().c_str()); + impl_->GetTestPartResultReporterForCurrentThread()-> + ReportTestPartResult(result); + + if (result_type != TestPartResult::kSuccess) { + // gtest_break_on_failure takes precedence over + // gtest_throw_on_failure. This allows a user to set the latter + // in the code (perhaps in order to use Google Test assertions + // with another testing framework) and specify the former on the + // command line for debugging. + if (GTEST_FLAG(break_on_failure)) { +#if GTEST_OS_WINDOWS + // Using DebugBreak on Windows allows gtest to still break into a debugger + // when a failure happens and both the --gtest_break_on_failure and + // the --gtest_catch_exceptions flags are specified. + DebugBreak(); +#else + // Dereference NULL through a volatile pointer to prevent the compiler + // from removing. We use this rather than abort() or __builtin_trap() for + // portability: Symbian doesn't implement abort() well, and some debuggers + // don't correctly trap abort(). + *static_cast(NULL) = 1; +#endif // GTEST_OS_WINDOWS + } else if (GTEST_FLAG(throw_on_failure)) { +#if GTEST_HAS_EXCEPTIONS + throw internal::GoogleTestFailureException(result); +#else + // We cannot call abort() as it generates a pop-up in debug mode + // that cannot be suppressed in VC 7.1 or below. + exit(1); +#endif + } + } +} + +// Adds a TestProperty to the current TestResult object when invoked from +// inside a test, to current TestCase's ad_hoc_test_result_ when invoked +// from SetUpTestCase or TearDownTestCase, or to the global property set +// when invoked elsewhere. If the result already contains a property with +// the same key, the value will be updated. +void UnitTest::RecordProperty(const std::string& key, + const std::string& value) { + impl_->RecordProperty(TestProperty(key, value)); +} + +// Runs all tests in this UnitTest object and prints the result. +// Returns 0 if successful, or 1 otherwise. +// +// We don't protect this under mutex_, as we only support calling it +// from the main thread. +int UnitTest::Run() { + const bool in_death_test_child_process = + internal::GTEST_FLAG(internal_run_death_test).length() > 0; + + // Google Test implements this protocol for catching that a test + // program exits before returning control to Google Test: + // + // 1. Upon start, Google Test creates a file whose absolute path + // is specified by the environment variable + // TEST_PREMATURE_EXIT_FILE. + // 2. When Google Test has finished its work, it deletes the file. + // + // This allows a test runner to set TEST_PREMATURE_EXIT_FILE before + // running a Google-Test-based test program and check the existence + // of the file at the end of the test execution to see if it has + // exited prematurely. + + // If we are in the child process of a death test, don't + // create/delete the premature exit file, as doing so is unnecessary + // and will confuse the parent process. Otherwise, create/delete + // the file upon entering/leaving this function. If the program + // somehow exits before this function has a chance to return, the + // premature-exit file will be left undeleted, causing a test runner + // that understands the premature-exit-file protocol to report the + // test as having failed. + const internal::ScopedPrematureExitFile premature_exit_file( + in_death_test_child_process ? + NULL : internal::posix::GetEnv("TEST_PREMATURE_EXIT_FILE")); + + // Captures the value of GTEST_FLAG(catch_exceptions). This value will be + // used for the duration of the program. + impl()->set_catch_exceptions(GTEST_FLAG(catch_exceptions)); + +#if GTEST_HAS_SEH + // Either the user wants Google Test to catch exceptions thrown by the + // tests or this is executing in the context of death test child + // process. In either case the user does not want to see pop-up dialogs + // about crashes - they are expected. + if (impl()->catch_exceptions() || in_death_test_child_process) { +# if !GTEST_OS_WINDOWS_MOBILE + // SetErrorMode doesn't exist on CE. + SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOALIGNMENTFAULTEXCEPT | + SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX); +# endif // !GTEST_OS_WINDOWS_MOBILE + +# if (defined(_MSC_VER) || GTEST_OS_WINDOWS_MINGW) && !GTEST_OS_WINDOWS_MOBILE + // Death test children can be terminated with _abort(). On Windows, + // _abort() can show a dialog with a warning message. This forces the + // abort message to go to stderr instead. + _set_error_mode(_OUT_TO_STDERR); +# endif + +# if _MSC_VER >= 1400 && !GTEST_OS_WINDOWS_MOBILE + // In the debug version, Visual Studio pops up a separate dialog + // offering a choice to debug the aborted program. We need to suppress + // this dialog or it will pop up for every EXPECT/ASSERT_DEATH statement + // executed. Google Test will notify the user of any unexpected + // failure via stderr. + // + // VC++ doesn't define _set_abort_behavior() prior to the version 8.0. + // Users of prior VC versions shall suffer the agony and pain of + // clicking through the countless debug dialogs. + // TODO(vladl@google.com): find a way to suppress the abort dialog() in the + // debug mode when compiled with VC 7.1 or lower. + if (!GTEST_FLAG(break_on_failure)) + _set_abort_behavior( + 0x0, // Clear the following flags: + _WRITE_ABORT_MSG | _CALL_REPORTFAULT); // pop-up window, core dump. +# endif + } +#endif // GTEST_HAS_SEH + + return internal::HandleExceptionsInMethodIfSupported( + impl(), + &internal::UnitTestImpl::RunAllTests, + "auxiliary test code (environments or event listeners)") ? 0 : 1; +} + +// Returns the working directory when the first TEST() or TEST_F() was +// executed. +const char* UnitTest::original_working_dir() const { + return impl_->original_working_dir_.c_str(); +} + +// Returns the TestCase object for the test that's currently running, +// or NULL if no test is running. +const TestCase* UnitTest::current_test_case() const + GTEST_LOCK_EXCLUDED_(mutex_) { + internal::MutexLock lock(&mutex_); + return impl_->current_test_case(); +} + +// Returns the TestInfo object for the test that's currently running, +// or NULL if no test is running. +const TestInfo* UnitTest::current_test_info() const + GTEST_LOCK_EXCLUDED_(mutex_) { + internal::MutexLock lock(&mutex_); + return impl_->current_test_info(); +} + +// Returns the random seed used at the start of the current test run. +int UnitTest::random_seed() const { return impl_->random_seed(); } + +#if GTEST_HAS_PARAM_TEST +// Returns ParameterizedTestCaseRegistry object used to keep track of +// value-parameterized tests and instantiate and register them. +internal::ParameterizedTestCaseRegistry& + UnitTest::parameterized_test_registry() + GTEST_LOCK_EXCLUDED_(mutex_) { + return impl_->parameterized_test_registry(); +} +#endif // GTEST_HAS_PARAM_TEST + +// Creates an empty UnitTest. +UnitTest::UnitTest() { + impl_ = new internal::UnitTestImpl(this); +} + +// Destructor of UnitTest. +UnitTest::~UnitTest() { + delete impl_; +} + +// Pushes a trace defined by SCOPED_TRACE() on to the per-thread +// Google Test trace stack. +void UnitTest::PushGTestTrace(const internal::TraceInfo& trace) + GTEST_LOCK_EXCLUDED_(mutex_) { + internal::MutexLock lock(&mutex_); + impl_->gtest_trace_stack().push_back(trace); +} + +// Pops a trace from the per-thread Google Test trace stack. +void UnitTest::PopGTestTrace() + GTEST_LOCK_EXCLUDED_(mutex_) { + internal::MutexLock lock(&mutex_); + impl_->gtest_trace_stack().pop_back(); +} + +namespace internal { + +UnitTestImpl::UnitTestImpl(UnitTest* parent) + : parent_(parent), +#ifdef _MSC_VER +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4355) // Temporarily disables warning 4355 + // (using this in initializer). + default_global_test_part_result_reporter_(this), + default_per_thread_test_part_result_reporter_(this), +# pragma warning(pop) // Restores the warning state again. +#else + default_global_test_part_result_reporter_(this), + default_per_thread_test_part_result_reporter_(this), +#endif // _MSC_VER + global_test_part_result_repoter_( + &default_global_test_part_result_reporter_), + per_thread_test_part_result_reporter_( + &default_per_thread_test_part_result_reporter_), +#if GTEST_HAS_PARAM_TEST + parameterized_test_registry_(), + parameterized_tests_registered_(false), +#endif // GTEST_HAS_PARAM_TEST + last_death_test_case_(-1), + current_test_case_(NULL), + current_test_info_(NULL), + ad_hoc_test_result_(), + os_stack_trace_getter_(NULL), + post_flag_parse_init_performed_(false), + random_seed_(0), // Will be overridden by the flag before first use. + random_(0), // Will be reseeded before first use. + start_timestamp_(0), + elapsed_time_(0), +#if GTEST_HAS_DEATH_TEST + death_test_factory_(new DefaultDeathTestFactory), +#endif + // Will be overridden by the flag before first use. + catch_exceptions_(false) { + listeners()->SetDefaultResultPrinter(new PrettyUnitTestResultPrinter); +} + +UnitTestImpl::~UnitTestImpl() { + // Deletes every TestCase. + ForEach(test_cases_, internal::Delete); + + // Deletes every Environment. + ForEach(environments_, internal::Delete); + + delete os_stack_trace_getter_; +} + +// Adds a TestProperty to the current TestResult object when invoked in a +// context of a test, to current test case's ad_hoc_test_result when invoke +// from SetUpTestCase/TearDownTestCase, or to the global property set +// otherwise. If the result already contains a property with the same key, +// the value will be updated. +void UnitTestImpl::RecordProperty(const TestProperty& test_property) { + std::string xml_element; + TestResult* test_result; // TestResult appropriate for property recording. + + if (current_test_info_ != NULL) { + xml_element = "testcase"; + test_result = &(current_test_info_->result_); + } else if (current_test_case_ != NULL) { + xml_element = "testsuite"; + test_result = &(current_test_case_->ad_hoc_test_result_); + } else { + xml_element = "testsuites"; + test_result = &ad_hoc_test_result_; + } + test_result->RecordProperty(xml_element, test_property); +} + +#if GTEST_HAS_DEATH_TEST +// Disables event forwarding if the control is currently in a death test +// subprocess. Must not be called before InitGoogleTest. +void UnitTestImpl::SuppressTestEventsIfInSubprocess() { + if (internal_run_death_test_flag_.get() != NULL) + listeners()->SuppressEventForwarding(); +} +#endif // GTEST_HAS_DEATH_TEST + +// Initializes event listeners performing XML output as specified by +// UnitTestOptions. Must not be called before InitGoogleTest. +void UnitTestImpl::ConfigureXmlOutput() { + const std::string& output_format = UnitTestOptions::GetOutputFormat(); + if (output_format == "xml") { + listeners()->SetDefaultXmlGenerator(new XmlUnitTestResultPrinter( + UnitTestOptions::GetAbsolutePathToOutputFile().c_str())); + } else if (output_format != "") { + printf("WARNING: unrecognized output format \"%s\" ignored.\n", + output_format.c_str()); + fflush(stdout); + } +} + +#if GTEST_CAN_STREAM_RESULTS_ +// Initializes event listeners for streaming test results in string form. +// Must not be called before InitGoogleTest. +void UnitTestImpl::ConfigureStreamingOutput() { + const std::string& target = GTEST_FLAG(stream_result_to); + if (!target.empty()) { + const size_t pos = target.find(':'); + if (pos != std::string::npos) { + listeners()->Append(new StreamingListener(target.substr(0, pos), + target.substr(pos+1))); + } else { + printf("WARNING: unrecognized streaming target \"%s\" ignored.\n", + target.c_str()); + fflush(stdout); + } + } +} +#endif // GTEST_CAN_STREAM_RESULTS_ + +// Performs initialization dependent upon flag values obtained in +// ParseGoogleTestFlagsOnly. Is called from InitGoogleTest after the call to +// ParseGoogleTestFlagsOnly. In case a user neglects to call InitGoogleTest +// this function is also called from RunAllTests. Since this function can be +// called more than once, it has to be idempotent. +void UnitTestImpl::PostFlagParsingInit() { + // Ensures that this function does not execute more than once. + if (!post_flag_parse_init_performed_) { + post_flag_parse_init_performed_ = true; + +#if GTEST_HAS_DEATH_TEST + InitDeathTestSubprocessControlInfo(); + SuppressTestEventsIfInSubprocess(); +#endif // GTEST_HAS_DEATH_TEST + + // Registers parameterized tests. This makes parameterized tests + // available to the UnitTest reflection API without running + // RUN_ALL_TESTS. + RegisterParameterizedTests(); + + // Configures listeners for XML output. This makes it possible for users + // to shut down the default XML output before invoking RUN_ALL_TESTS. + ConfigureXmlOutput(); + +#if GTEST_CAN_STREAM_RESULTS_ + // Configures listeners for streaming test results to the specified server. + ConfigureStreamingOutput(); +#endif // GTEST_CAN_STREAM_RESULTS_ + } +} + +// A predicate that checks the name of a TestCase against a known +// value. +// +// This is used for implementation of the UnitTest class only. We put +// it in the anonymous namespace to prevent polluting the outer +// namespace. +// +// TestCaseNameIs is copyable. +class TestCaseNameIs { + public: + // Constructor. + explicit TestCaseNameIs(const std::string& name) + : name_(name) {} + + // Returns true iff the name of test_case matches name_. + bool operator()(const TestCase* test_case) const { + return test_case != NULL && strcmp(test_case->name(), name_.c_str()) == 0; + } + + private: + std::string name_; +}; + +// Finds and returns a TestCase with the given name. If one doesn't +// exist, creates one and returns it. It's the CALLER'S +// RESPONSIBILITY to ensure that this function is only called WHEN THE +// TESTS ARE NOT SHUFFLED. +// +// Arguments: +// +// test_case_name: name of the test case +// type_param: the name of the test case's type parameter, or NULL if +// this is not a typed or a type-parameterized test case. +// set_up_tc: pointer to the function that sets up the test case +// tear_down_tc: pointer to the function that tears down the test case +TestCase* UnitTestImpl::GetTestCase(const char* test_case_name, + const char* type_param, + Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc) { + // Can we find a TestCase with the given name? + const std::vector::const_iterator test_case = + std::find_if(test_cases_.begin(), test_cases_.end(), + TestCaseNameIs(test_case_name)); + + if (test_case != test_cases_.end()) + return *test_case; + + // No. Let's create one. + TestCase* const new_test_case = + new TestCase(test_case_name, type_param, set_up_tc, tear_down_tc); + + // Is this a death test case? + if (internal::UnitTestOptions::MatchesFilter(test_case_name, + kDeathTestCaseFilter)) { + // Yes. Inserts the test case after the last death test case + // defined so far. This only works when the test cases haven't + // been shuffled. Otherwise we may end up running a death test + // after a non-death test. + ++last_death_test_case_; + test_cases_.insert(test_cases_.begin() + last_death_test_case_, + new_test_case); + } else { + // No. Appends to the end of the list. + test_cases_.push_back(new_test_case); + } + + test_case_indices_.push_back(static_cast(test_case_indices_.size())); + return new_test_case; +} + +// Helpers for setting up / tearing down the given environment. They +// are for use in the ForEach() function. +static void SetUpEnvironment(Environment* env) { env->SetUp(); } +static void TearDownEnvironment(Environment* env) { env->TearDown(); } + +// Runs all tests in this UnitTest object, prints the result, and +// returns true if all tests are successful. If any exception is +// thrown during a test, the test is considered to be failed, but the +// rest of the tests will still be run. +// +// When parameterized tests are enabled, it expands and registers +// parameterized tests first in RegisterParameterizedTests(). +// All other functions called from RunAllTests() may safely assume that +// parameterized tests are ready to be counted and run. +bool UnitTestImpl::RunAllTests() { + // Makes sure InitGoogleTest() was called. + if (!GTestIsInitialized()) { + printf("%s", + "\nThis test program did NOT call ::testing::InitGoogleTest " + "before calling RUN_ALL_TESTS(). Please fix it.\n"); + return false; + } + + // Do not run any test if the --help flag was specified. + if (g_help_flag) + return true; + + // Repeats the call to the post-flag parsing initialization in case the + // user didn't call InitGoogleTest. + PostFlagParsingInit(); + + // Even if sharding is not on, test runners may want to use the + // GTEST_SHARD_STATUS_FILE to query whether the test supports the sharding + // protocol. + internal::WriteToShardStatusFileIfNeeded(); + + // True iff we are in a subprocess for running a thread-safe-style + // death test. + bool in_subprocess_for_death_test = false; + +#if GTEST_HAS_DEATH_TEST + in_subprocess_for_death_test = (internal_run_death_test_flag_.get() != NULL); +#endif // GTEST_HAS_DEATH_TEST + + const bool should_shard = ShouldShard(kTestTotalShards, kTestShardIndex, + in_subprocess_for_death_test); + + // Compares the full test names with the filter to decide which + // tests to run. + const bool has_tests_to_run = FilterTests(should_shard + ? HONOR_SHARDING_PROTOCOL + : IGNORE_SHARDING_PROTOCOL) > 0; + + // Lists the tests and exits if the --gtest_list_tests flag was specified. + if (GTEST_FLAG(list_tests)) { + // This must be called *after* FilterTests() has been called. + ListTestsMatchingFilter(); + return true; + } + + random_seed_ = GTEST_FLAG(shuffle) ? + GetRandomSeedFromFlag(GTEST_FLAG(random_seed)) : 0; + + // True iff at least one test has failed. + bool failed = false; + + TestEventListener* repeater = listeners()->repeater(); + + start_timestamp_ = GetTimeInMillis(); + repeater->OnTestProgramStart(*parent_); + + // How many times to repeat the tests? We don't want to repeat them + // when we are inside the subprocess of a death test. + const int repeat = in_subprocess_for_death_test ? 1 : GTEST_FLAG(repeat); + // Repeats forever if the repeat count is negative. + const bool forever = repeat < 0; + for (int i = 0; forever || i != repeat; i++) { + // We want to preserve failures generated by ad-hoc test + // assertions executed before RUN_ALL_TESTS(). + ClearNonAdHocTestResult(); + + const TimeInMillis start = GetTimeInMillis(); + + // Shuffles test cases and tests if requested. + if (has_tests_to_run && GTEST_FLAG(shuffle)) { + random()->Reseed(random_seed_); + // This should be done before calling OnTestIterationStart(), + // such that a test event listener can see the actual test order + // in the event. + ShuffleTests(); + } + + // Tells the unit test event listeners that the tests are about to start. + repeater->OnTestIterationStart(*parent_, i); + + // Runs each test case if there is at least one test to run. + if (has_tests_to_run) { + // Sets up all environments beforehand. + repeater->OnEnvironmentsSetUpStart(*parent_); + ForEach(environments_, SetUpEnvironment); + repeater->OnEnvironmentsSetUpEnd(*parent_); + + // Runs the tests only if there was no fatal failure during global + // set-up. + if (!Test::HasFatalFailure()) { + for (int test_index = 0; test_index < total_test_case_count(); + test_index++) { + GetMutableTestCase(test_index)->Run(); + } + } + + // Tears down all environments in reverse order afterwards. + repeater->OnEnvironmentsTearDownStart(*parent_); + std::for_each(environments_.rbegin(), environments_.rend(), + TearDownEnvironment); + repeater->OnEnvironmentsTearDownEnd(*parent_); + } + + elapsed_time_ = GetTimeInMillis() - start; + + // Tells the unit test event listener that the tests have just finished. + repeater->OnTestIterationEnd(*parent_, i); + + // Gets the result and clears it. + if (!Passed()) { + failed = true; + } + + // Restores the original test order after the iteration. This + // allows the user to quickly repro a failure that happens in the + // N-th iteration without repeating the first (N - 1) iterations. + // This is not enclosed in "if (GTEST_FLAG(shuffle)) { ... }", in + // case the user somehow changes the value of the flag somewhere + // (it's always safe to unshuffle the tests). + UnshuffleTests(); + + if (GTEST_FLAG(shuffle)) { + // Picks a new random seed for each iteration. + random_seed_ = GetNextRandomSeed(random_seed_); + } + } + + repeater->OnTestProgramEnd(*parent_); + + return !failed; +} + +// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file +// if the variable is present. If a file already exists at this location, this +// function will write over it. If the variable is present, but the file cannot +// be created, prints an error and exits. +void WriteToShardStatusFileIfNeeded() { + const char* const test_shard_file = posix::GetEnv(kTestShardStatusFile); + if (test_shard_file != NULL) { + FILE* const file = posix::FOpen(test_shard_file, "w"); + if (file == NULL) { + ColoredPrintf(COLOR_RED, + "Could not write to the test shard status file \"%s\" " + "specified by the %s environment variable.\n", + test_shard_file, kTestShardStatusFile); + fflush(stdout); + exit(EXIT_FAILURE); + } + fclose(file); + } +} + +// Checks whether sharding is enabled by examining the relevant +// environment variable values. If the variables are present, +// but inconsistent (i.e., shard_index >= total_shards), prints +// an error and exits. If in_subprocess_for_death_test, sharding is +// disabled because it must only be applied to the original test +// process. Otherwise, we could filter out death tests we intended to execute. +bool ShouldShard(const char* total_shards_env, + const char* shard_index_env, + bool in_subprocess_for_death_test) { + if (in_subprocess_for_death_test) { + return false; + } + + const Int32 total_shards = Int32FromEnvOrDie(total_shards_env, -1); + const Int32 shard_index = Int32FromEnvOrDie(shard_index_env, -1); + + if (total_shards == -1 && shard_index == -1) { + return false; + } else if (total_shards == -1 && shard_index != -1) { + const Message msg = Message() + << "Invalid environment variables: you have " + << kTestShardIndex << " = " << shard_index + << ", but have left " << kTestTotalShards << " unset.\n"; + ColoredPrintf(COLOR_RED, msg.GetString().c_str()); + fflush(stdout); + exit(EXIT_FAILURE); + } else if (total_shards != -1 && shard_index == -1) { + const Message msg = Message() + << "Invalid environment variables: you have " + << kTestTotalShards << " = " << total_shards + << ", but have left " << kTestShardIndex << " unset.\n"; + ColoredPrintf(COLOR_RED, msg.GetString().c_str()); + fflush(stdout); + exit(EXIT_FAILURE); + } else if (shard_index < 0 || shard_index >= total_shards) { + const Message msg = Message() + << "Invalid environment variables: we require 0 <= " + << kTestShardIndex << " < " << kTestTotalShards + << ", but you have " << kTestShardIndex << "=" << shard_index + << ", " << kTestTotalShards << "=" << total_shards << ".\n"; + ColoredPrintf(COLOR_RED, msg.GetString().c_str()); + fflush(stdout); + exit(EXIT_FAILURE); + } + + return total_shards > 1; +} + +// Parses the environment variable var as an Int32. If it is unset, +// returns default_val. If it is not an Int32, prints an error +// and aborts. +Int32 Int32FromEnvOrDie(const char* var, Int32 default_val) { + const char* str_val = posix::GetEnv(var); + if (str_val == NULL) { + return default_val; + } + + Int32 result; + if (!ParseInt32(Message() << "The value of environment variable " << var, + str_val, &result)) { + exit(EXIT_FAILURE); + } + return result; +} + +// Given the total number of shards, the shard index, and the test id, +// returns true iff the test should be run on this shard. The test id is +// some arbitrary but unique non-negative integer assigned to each test +// method. Assumes that 0 <= shard_index < total_shards. +bool ShouldRunTestOnShard(int total_shards, int shard_index, int test_id) { + return (test_id % total_shards) == shard_index; +} + +// Compares the name of each test with the user-specified filter to +// decide whether the test should be run, then records the result in +// each TestCase and TestInfo object. +// If shard_tests == true, further filters tests based on sharding +// variables in the environment - see +// http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide. +// Returns the number of tests that should run. +int UnitTestImpl::FilterTests(ReactionToSharding shard_tests) { + const Int32 total_shards = shard_tests == HONOR_SHARDING_PROTOCOL ? + Int32FromEnvOrDie(kTestTotalShards, -1) : -1; + const Int32 shard_index = shard_tests == HONOR_SHARDING_PROTOCOL ? + Int32FromEnvOrDie(kTestShardIndex, -1) : -1; + + // num_runnable_tests are the number of tests that will + // run across all shards (i.e., match filter and are not disabled). + // num_selected_tests are the number of tests to be run on + // this shard. + int num_runnable_tests = 0; + int num_selected_tests = 0; + for (size_t i = 0; i < test_cases_.size(); i++) { + TestCase* const test_case = test_cases_[i]; + const std::string &test_case_name = test_case->name(); + test_case->set_should_run(false); + + for (size_t j = 0; j < test_case->test_info_list().size(); j++) { + TestInfo* const test_info = test_case->test_info_list()[j]; + const std::string test_name(test_info->name()); + // A test is disabled if test case name or test name matches + // kDisableTestFilter. + const bool is_disabled = + internal::UnitTestOptions::MatchesFilter(test_case_name, + kDisableTestFilter) || + internal::UnitTestOptions::MatchesFilter(test_name, + kDisableTestFilter); + test_info->is_disabled_ = is_disabled; + + const bool matches_filter = + internal::UnitTestOptions::FilterMatchesTest(test_case_name, + test_name); + test_info->matches_filter_ = matches_filter; + + const bool is_runnable = + (GTEST_FLAG(also_run_disabled_tests) || !is_disabled) && + matches_filter; + + const bool is_selected = is_runnable && + (shard_tests == IGNORE_SHARDING_PROTOCOL || + ShouldRunTestOnShard(total_shards, shard_index, + num_runnable_tests)); + + num_runnable_tests += is_runnable; + num_selected_tests += is_selected; + + test_info->should_run_ = is_selected; + test_case->set_should_run(test_case->should_run() || is_selected); + } + } + return num_selected_tests; +} + +// Prints the given C-string on a single line by replacing all '\n' +// characters with string "\\n". If the output takes more than +// max_length characters, only prints the first max_length characters +// and "...". +static void PrintOnOneLine(const char* str, int max_length) { + if (str != NULL) { + for (int i = 0; *str != '\0'; ++str) { + if (i >= max_length) { + printf("..."); + break; + } + if (*str == '\n') { + printf("\\n"); + i += 2; + } else { + printf("%c", *str); + ++i; + } + } + } +} + +// Prints the names of the tests matching the user-specified filter flag. +void UnitTestImpl::ListTestsMatchingFilter() { + // Print at most this many characters for each type/value parameter. + const int kMaxParamLength = 250; + + for (size_t i = 0; i < test_cases_.size(); i++) { + const TestCase* const test_case = test_cases_[i]; + bool printed_test_case_name = false; + + for (size_t j = 0; j < test_case->test_info_list().size(); j++) { + const TestInfo* const test_info = + test_case->test_info_list()[j]; + if (test_info->matches_filter_) { + if (!printed_test_case_name) { + printed_test_case_name = true; + printf("%s.", test_case->name()); + if (test_case->type_param() != NULL) { + printf(" # %s = ", kTypeParamLabel); + // We print the type parameter on a single line to make + // the output easy to parse by a program. + PrintOnOneLine(test_case->type_param(), kMaxParamLength); + } + printf("\n"); + } + printf(" %s", test_info->name()); + if (test_info->value_param() != NULL) { + printf(" # %s = ", kValueParamLabel); + // We print the value parameter on a single line to make the + // output easy to parse by a program. + PrintOnOneLine(test_info->value_param(), kMaxParamLength); + } + printf("\n"); + } + } + } + fflush(stdout); +} + +// Sets the OS stack trace getter. +// +// Does nothing if the input and the current OS stack trace getter are +// the same; otherwise, deletes the old getter and makes the input the +// current getter. +void UnitTestImpl::set_os_stack_trace_getter( + OsStackTraceGetterInterface* getter) { + if (os_stack_trace_getter_ != getter) { + delete os_stack_trace_getter_; + os_stack_trace_getter_ = getter; + } +} + +// Returns the current OS stack trace getter if it is not NULL; +// otherwise, creates an OsStackTraceGetter, makes it the current +// getter, and returns it. +OsStackTraceGetterInterface* UnitTestImpl::os_stack_trace_getter() { + if (os_stack_trace_getter_ == NULL) { + os_stack_trace_getter_ = new OsStackTraceGetter; + } + + return os_stack_trace_getter_; +} + +// Returns the TestResult for the test that's currently running, or +// the TestResult for the ad hoc test if no test is running. +TestResult* UnitTestImpl::current_test_result() { + return current_test_info_ ? + &(current_test_info_->result_) : &ad_hoc_test_result_; +} + +// Shuffles all test cases, and the tests within each test case, +// making sure that death tests are still run first. +void UnitTestImpl::ShuffleTests() { + // Shuffles the death test cases. + ShuffleRange(random(), 0, last_death_test_case_ + 1, &test_case_indices_); + + // Shuffles the non-death test cases. + ShuffleRange(random(), last_death_test_case_ + 1, + static_cast(test_cases_.size()), &test_case_indices_); + + // Shuffles the tests inside each test case. + for (size_t i = 0; i < test_cases_.size(); i++) { + test_cases_[i]->ShuffleTests(random()); + } +} + +// Restores the test cases and tests to their order before the first shuffle. +void UnitTestImpl::UnshuffleTests() { + for (size_t i = 0; i < test_cases_.size(); i++) { + // Unshuffles the tests in each test case. + test_cases_[i]->UnshuffleTests(); + // Resets the index of each test case. + test_case_indices_[i] = static_cast(i); + } +} + +// Returns the current OS stack trace as an std::string. +// +// The maximum number of stack frames to be included is specified by +// the gtest_stack_trace_depth flag. The skip_count parameter +// specifies the number of top frames to be skipped, which doesn't +// count against the number of frames to be included. +// +// For example, if Foo() calls Bar(), which in turn calls +// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in +// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't. +std::string GetCurrentOsStackTraceExceptTop(UnitTest* /*unit_test*/, + int skip_count) { + // We pass skip_count + 1 to skip this wrapper function in addition + // to what the user really wants to skip. + return GetUnitTestImpl()->CurrentOsStackTraceExceptTop(skip_count + 1); +} + +// Used by the GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_ macro to +// suppress unreachable code warnings. +namespace { +class ClassUniqueToAlwaysTrue {}; +} + +bool IsTrue(bool condition) { return condition; } + +bool AlwaysTrue() { +#if GTEST_HAS_EXCEPTIONS + // This condition is always false so AlwaysTrue() never actually throws, + // but it makes the compiler think that it may throw. + if (IsTrue(false)) + throw ClassUniqueToAlwaysTrue(); +#endif // GTEST_HAS_EXCEPTIONS + return true; +} + +// If *pstr starts with the given prefix, modifies *pstr to be right +// past the prefix and returns true; otherwise leaves *pstr unchanged +// and returns false. None of pstr, *pstr, and prefix can be NULL. +bool SkipPrefix(const char* prefix, const char** pstr) { + const size_t prefix_len = strlen(prefix); + if (strncmp(*pstr, prefix, prefix_len) == 0) { + *pstr += prefix_len; + return true; + } + return false; +} + +// Parses a string as a command line flag. The string should have +// the format "--flag=value". When def_optional is true, the "=value" +// part can be omitted. +// +// Returns the value of the flag, or NULL if the parsing failed. +const char* ParseFlagValue(const char* str, + const char* flag, + bool def_optional) { + // str and flag must not be NULL. + if (str == NULL || flag == NULL) return NULL; + + // The flag must start with "--" followed by GTEST_FLAG_PREFIX_. + const std::string flag_str = std::string("--") + GTEST_FLAG_PREFIX_ + flag; + const size_t flag_len = flag_str.length(); + if (strncmp(str, flag_str.c_str(), flag_len) != 0) return NULL; + + // Skips the flag name. + const char* flag_end = str + flag_len; + + // When def_optional is true, it's OK to not have a "=value" part. + if (def_optional && (flag_end[0] == '\0')) { + return flag_end; + } + + // If def_optional is true and there are more characters after the + // flag name, or if def_optional is false, there must be a '=' after + // the flag name. + if (flag_end[0] != '=') return NULL; + + // Returns the string after "=". + return flag_end + 1; +} + +// Parses a string for a bool flag, in the form of either +// "--flag=value" or "--flag". +// +// In the former case, the value is taken as true as long as it does +// not start with '0', 'f', or 'F'. +// +// In the latter case, the value is taken as true. +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +bool ParseBoolFlag(const char* str, const char* flag, bool* value) { + // Gets the value of the flag as a string. + const char* const value_str = ParseFlagValue(str, flag, true); + + // Aborts if the parsing failed. + if (value_str == NULL) return false; + + // Converts the string value to a bool. + *value = !(*value_str == '0' || *value_str == 'f' || *value_str == 'F'); + return true; +} + +// Parses a string for an Int32 flag, in the form of +// "--flag=value". +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +bool ParseInt32Flag(const char* str, const char* flag, Int32* value) { + // Gets the value of the flag as a string. + const char* const value_str = ParseFlagValue(str, flag, false); + + // Aborts if the parsing failed. + if (value_str == NULL) return false; + + // Sets *value to the value of the flag. + return ParseInt32(Message() << "The value of flag --" << flag, + value_str, value); +} + +// Parses a string for a string flag, in the form of +// "--flag=value". +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +bool ParseStringFlag(const char* str, const char* flag, std::string* value) { + // Gets the value of the flag as a string. + const char* const value_str = ParseFlagValue(str, flag, false); + + // Aborts if the parsing failed. + if (value_str == NULL) return false; + + // Sets *value to the value of the flag. + *value = value_str; + return true; +} + +// Determines whether a string has a prefix that Google Test uses for its +// flags, i.e., starts with GTEST_FLAG_PREFIX_ or GTEST_FLAG_PREFIX_DASH_. +// If Google Test detects that a command line flag has its prefix but is not +// recognized, it will print its help message. Flags starting with +// GTEST_INTERNAL_PREFIX_ followed by "internal_" are considered Google Test +// internal flags and do not trigger the help message. +static bool HasGoogleTestFlagPrefix(const char* str) { + return (SkipPrefix("--", &str) || + SkipPrefix("-", &str) || + SkipPrefix("/", &str)) && + !SkipPrefix(GTEST_FLAG_PREFIX_ "internal_", &str) && + (SkipPrefix(GTEST_FLAG_PREFIX_, &str) || + SkipPrefix(GTEST_FLAG_PREFIX_DASH_, &str)); +} + +// Prints a string containing code-encoded text. The following escape +// sequences can be used in the string to control the text color: +// +// @@ prints a single '@' character. +// @R changes the color to red. +// @G changes the color to green. +// @Y changes the color to yellow. +// @D changes to the default terminal text color. +// +// TODO(wan@google.com): Write tests for this once we add stdout +// capturing to Google Test. +static void PrintColorEncoded(const char* str) { + GTestColor color = COLOR_DEFAULT; // The current color. + + // Conceptually, we split the string into segments divided by escape + // sequences. Then we print one segment at a time. At the end of + // each iteration, the str pointer advances to the beginning of the + // next segment. + for (;;) { + const char* p = strchr(str, '@'); + if (p == NULL) { + ColoredPrintf(color, "%s", str); + return; + } + + ColoredPrintf(color, "%s", std::string(str, p).c_str()); + + const char ch = p[1]; + str = p + 2; + if (ch == '@') { + ColoredPrintf(color, "@"); + } else if (ch == 'D') { + color = COLOR_DEFAULT; + } else if (ch == 'R') { + color = COLOR_RED; + } else if (ch == 'G') { + color = COLOR_GREEN; + } else if (ch == 'Y') { + color = COLOR_YELLOW; + } else { + --str; + } + } +} + +static const char kColorEncodedHelpMessage[] = +"This program contains tests written using " GTEST_NAME_ ". You can use the\n" +"following command line flags to control its behavior:\n" +"\n" +"Test Selection:\n" +" @G--" GTEST_FLAG_PREFIX_ "list_tests@D\n" +" List the names of all tests instead of running them. The name of\n" +" TEST(Foo, Bar) is \"Foo.Bar\".\n" +" @G--" GTEST_FLAG_PREFIX_ "filter=@YPOSTIVE_PATTERNS" + "[@G-@YNEGATIVE_PATTERNS]@D\n" +" Run only the tests whose name matches one of the positive patterns but\n" +" none of the negative patterns. '?' matches any single character; '*'\n" +" matches any substring; ':' separates two patterns.\n" +" @G--" GTEST_FLAG_PREFIX_ "also_run_disabled_tests@D\n" +" Run all disabled tests too.\n" +"\n" +"Test Execution:\n" +" @G--" GTEST_FLAG_PREFIX_ "repeat=@Y[COUNT]@D\n" +" Run the tests repeatedly; use a negative count to repeat forever.\n" +" @G--" GTEST_FLAG_PREFIX_ "shuffle@D\n" +" Randomize tests' orders on every iteration.\n" +" @G--" GTEST_FLAG_PREFIX_ "random_seed=@Y[NUMBER]@D\n" +" Random number seed to use for shuffling test orders (between 1 and\n" +" 99999, or 0 to use a seed based on the current time).\n" +"\n" +"Test Output:\n" +" @G--" GTEST_FLAG_PREFIX_ "color=@Y(@Gyes@Y|@Gno@Y|@Gauto@Y)@D\n" +" Enable/disable colored output. The default is @Gauto@D.\n" +" -@G-" GTEST_FLAG_PREFIX_ "print_time=0@D\n" +" Don't print the elapsed time of each test.\n" +" @G--" GTEST_FLAG_PREFIX_ "output=xml@Y[@G:@YDIRECTORY_PATH@G" + GTEST_PATH_SEP_ "@Y|@G:@YFILE_PATH]@D\n" +" Generate an XML report in the given directory or with the given file\n" +" name. @YFILE_PATH@D defaults to @Gtest_details.xml@D.\n" +#if GTEST_CAN_STREAM_RESULTS_ +" @G--" GTEST_FLAG_PREFIX_ "stream_result_to=@YHOST@G:@YPORT@D\n" +" Stream test results to the given server.\n" +#endif // GTEST_CAN_STREAM_RESULTS_ +"\n" +"Assertion Behavior:\n" +#if GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS +" @G--" GTEST_FLAG_PREFIX_ "death_test_style=@Y(@Gfast@Y|@Gthreadsafe@Y)@D\n" +" Set the default death test style.\n" +#endif // GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS +" @G--" GTEST_FLAG_PREFIX_ "break_on_failure@D\n" +" Turn assertion failures into debugger break-points.\n" +" @G--" GTEST_FLAG_PREFIX_ "throw_on_failure@D\n" +" Turn assertion failures into C++ exceptions.\n" +" @G--" GTEST_FLAG_PREFIX_ "catch_exceptions=0@D\n" +" Do not report exceptions as test failures. Instead, allow them\n" +" to crash the program or throw a pop-up (on Windows).\n" +"\n" +"Except for @G--" GTEST_FLAG_PREFIX_ "list_tests@D, you can alternatively set " + "the corresponding\n" +"environment variable of a flag (all letters in upper-case). For example, to\n" +"disable colored text output, you can either specify @G--" GTEST_FLAG_PREFIX_ + "color=no@D or set\n" +"the @G" GTEST_FLAG_PREFIX_UPPER_ "COLOR@D environment variable to @Gno@D.\n" +"\n" +"For more information, please read the " GTEST_NAME_ " documentation at\n" +"@G" GTEST_PROJECT_URL_ "@D. If you find a bug in " GTEST_NAME_ "\n" +"(not one in your own code or tests), please report it to\n" +"@G<" GTEST_DEV_EMAIL_ ">@D.\n"; + +// Parses the command line for Google Test flags, without initializing +// other parts of Google Test. The type parameter CharType can be +// instantiated to either char or wchar_t. +template +void ParseGoogleTestFlagsOnlyImpl(int* argc, CharType** argv) { + for (int i = 1; i < *argc; i++) { + const std::string arg_string = StreamableToString(argv[i]); + const char* const arg = arg_string.c_str(); + + using internal::ParseBoolFlag; + using internal::ParseInt32Flag; + using internal::ParseStringFlag; + + // Do we see a Google Test flag? + if (ParseBoolFlag(arg, kAlsoRunDisabledTestsFlag, + >EST_FLAG(also_run_disabled_tests)) || + ParseBoolFlag(arg, kBreakOnFailureFlag, + >EST_FLAG(break_on_failure)) || + ParseBoolFlag(arg, kCatchExceptionsFlag, + >EST_FLAG(catch_exceptions)) || + ParseStringFlag(arg, kColorFlag, >EST_FLAG(color)) || + ParseStringFlag(arg, kDeathTestStyleFlag, + >EST_FLAG(death_test_style)) || + ParseBoolFlag(arg, kDeathTestUseFork, + >EST_FLAG(death_test_use_fork)) || + ParseStringFlag(arg, kFilterFlag, >EST_FLAG(filter)) || + ParseStringFlag(arg, kInternalRunDeathTestFlag, + >EST_FLAG(internal_run_death_test)) || + ParseBoolFlag(arg, kListTestsFlag, >EST_FLAG(list_tests)) || + ParseStringFlag(arg, kOutputFlag, >EST_FLAG(output)) || + ParseBoolFlag(arg, kPrintTimeFlag, >EST_FLAG(print_time)) || + ParseInt32Flag(arg, kRandomSeedFlag, >EST_FLAG(random_seed)) || + ParseInt32Flag(arg, kRepeatFlag, >EST_FLAG(repeat)) || + ParseBoolFlag(arg, kShuffleFlag, >EST_FLAG(shuffle)) || + ParseInt32Flag(arg, kStackTraceDepthFlag, + >EST_FLAG(stack_trace_depth)) || + ParseStringFlag(arg, kStreamResultToFlag, + >EST_FLAG(stream_result_to)) || + ParseBoolFlag(arg, kThrowOnFailureFlag, + >EST_FLAG(throw_on_failure)) + ) { + // Yes. Shift the remainder of the argv list left by one. Note + // that argv has (*argc + 1) elements, the last one always being + // NULL. The following loop moves the trailing NULL element as + // well. + for (int j = i; j != *argc; j++) { + argv[j] = argv[j + 1]; + } + + // Decrements the argument count. + (*argc)--; + + // We also need to decrement the iterator as we just removed + // an element. + i--; + } else if (arg_string == "--help" || arg_string == "-h" || + arg_string == "-?" || arg_string == "/?" || + HasGoogleTestFlagPrefix(arg)) { + // Both help flag and unrecognized Google Test flags (excluding + // internal ones) trigger help display. + g_help_flag = true; + } + } + + if (g_help_flag) { + // We print the help here instead of in RUN_ALL_TESTS(), as the + // latter may not be called at all if the user is using Google + // Test with another testing framework. + PrintColorEncoded(kColorEncodedHelpMessage); + } +} + +// Parses the command line for Google Test flags, without initializing +// other parts of Google Test. +void ParseGoogleTestFlagsOnly(int* argc, char** argv) { + ParseGoogleTestFlagsOnlyImpl(argc, argv); +} +void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv) { + ParseGoogleTestFlagsOnlyImpl(argc, argv); +} + +// The internal implementation of InitGoogleTest(). +// +// The type parameter CharType can be instantiated to either char or +// wchar_t. +template +void InitGoogleTestImpl(int* argc, CharType** argv) { + g_init_gtest_count++; + + // We don't want to run the initialization code twice. + if (g_init_gtest_count != 1) return; + + if (*argc <= 0) return; + + internal::g_executable_path = internal::StreamableToString(argv[0]); + +#if GTEST_HAS_DEATH_TEST + + g_argvs.clear(); + for (int i = 0; i != *argc; i++) { + g_argvs.push_back(StreamableToString(argv[i])); + } + +#endif // GTEST_HAS_DEATH_TEST + + ParseGoogleTestFlagsOnly(argc, argv); + GetUnitTestImpl()->PostFlagParsingInit(); +} + +} // namespace internal + +// Initializes Google Test. This must be called before calling +// RUN_ALL_TESTS(). In particular, it parses a command line for the +// flags that Google Test recognizes. Whenever a Google Test flag is +// seen, it is removed from argv, and *argc is decremented. +// +// No value is returned. Instead, the Google Test flag variables are +// updated. +// +// Calling the function for the second time has no user-visible effect. +void InitGoogleTest(int* argc, char** argv) { + internal::InitGoogleTestImpl(argc, argv); +} + +// This overloaded version can be used in Windows programs compiled in +// UNICODE mode. +void InitGoogleTest(int* argc, wchar_t** argv) { + internal::InitGoogleTestImpl(argc, argv); +} + +} // namespace testing +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan), vladl@google.com (Vlad Losev) +// +// This file implements death tests. + + +#if GTEST_HAS_DEATH_TEST + +# if GTEST_OS_MAC +# include +# endif // GTEST_OS_MAC + +# include +# include +# include + +# if GTEST_OS_LINUX +# include +# endif // GTEST_OS_LINUX + +# include + +# if GTEST_OS_WINDOWS +# include +# else +# include +# include +# endif // GTEST_OS_WINDOWS + +# if GTEST_OS_QNX +# include +# endif // GTEST_OS_QNX + +#endif // GTEST_HAS_DEATH_TEST + + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick is to +// prevent a user from accidentally including gtest-internal-inl.h in +// his code. +#define GTEST_IMPLEMENTATION_ 1 +#undef GTEST_IMPLEMENTATION_ + +namespace testing { + +// Constants. + +// The default death test style. +static const char kDefaultDeathTestStyle[] = "fast"; + +GTEST_DEFINE_string_( + death_test_style, + internal::StringFromGTestEnv("death_test_style", kDefaultDeathTestStyle), + "Indicates how to run a death test in a forked child process: " + "\"threadsafe\" (child process re-executes the test binary " + "from the beginning, running only the specific death test) or " + "\"fast\" (child process runs the death test immediately " + "after forking)."); + +GTEST_DEFINE_bool_( + death_test_use_fork, + internal::BoolFromGTestEnv("death_test_use_fork", false), + "Instructs to use fork()/_exit() instead of clone() in death tests. " + "Ignored and always uses fork() on POSIX systems where clone() is not " + "implemented. Useful when running under valgrind or similar tools if " + "those do not support clone(). Valgrind 3.3.1 will just fail if " + "it sees an unsupported combination of clone() flags. " + "It is not recommended to use this flag w/o valgrind though it will " + "work in 99% of the cases. Once valgrind is fixed, this flag will " + "most likely be removed."); + +namespace internal { +GTEST_DEFINE_string_( + internal_run_death_test, "", + "Indicates the file, line number, temporal index of " + "the single death test to run, and a file descriptor to " + "which a success code may be sent, all separated by " + "the '|' characters. This flag is specified if and only if the current " + "process is a sub-process launched for running a thread-safe " + "death test. FOR INTERNAL USE ONLY."); +} // namespace internal + +#if GTEST_HAS_DEATH_TEST + +namespace internal { + +// Valid only for fast death tests. Indicates the code is running in the +// child process of a fast style death test. +static bool g_in_fast_death_test_child = false; + +// Returns a Boolean value indicating whether the caller is currently +// executing in the context of the death test child process. Tools such as +// Valgrind heap checkers may need this to modify their behavior in death +// tests. IMPORTANT: This is an internal utility. Using it may break the +// implementation of death tests. User code MUST NOT use it. +bool InDeathTestChild() { +# if GTEST_OS_WINDOWS + + // On Windows, death tests are thread-safe regardless of the value of the + // death_test_style flag. + return !GTEST_FLAG(internal_run_death_test).empty(); + +# else + + if (GTEST_FLAG(death_test_style) == "threadsafe") + return !GTEST_FLAG(internal_run_death_test).empty(); + else + return g_in_fast_death_test_child; +#endif +} + +} // namespace internal + +// ExitedWithCode constructor. +ExitedWithCode::ExitedWithCode(int exit_code) : exit_code_(exit_code) { +} + +// ExitedWithCode function-call operator. +bool ExitedWithCode::operator()(int exit_status) const { +# if GTEST_OS_WINDOWS + + return exit_status == exit_code_; + +# else + + return WIFEXITED(exit_status) && WEXITSTATUS(exit_status) == exit_code_; + +# endif // GTEST_OS_WINDOWS +} + +# if !GTEST_OS_WINDOWS +// KilledBySignal constructor. +KilledBySignal::KilledBySignal(int signum) : signum_(signum) { +} + +// KilledBySignal function-call operator. +bool KilledBySignal::operator()(int exit_status) const { + return WIFSIGNALED(exit_status) && WTERMSIG(exit_status) == signum_; +} +# endif // !GTEST_OS_WINDOWS + +namespace internal { + +// Utilities needed for death tests. + +// Generates a textual description of a given exit code, in the format +// specified by wait(2). +static std::string ExitSummary(int exit_code) { + Message m; + +# if GTEST_OS_WINDOWS + + m << "Exited with exit status " << exit_code; + +# else + + if (WIFEXITED(exit_code)) { + m << "Exited with exit status " << WEXITSTATUS(exit_code); + } else if (WIFSIGNALED(exit_code)) { + m << "Terminated by signal " << WTERMSIG(exit_code); + } +# ifdef WCOREDUMP + if (WCOREDUMP(exit_code)) { + m << " (core dumped)"; + } +# endif +# endif // GTEST_OS_WINDOWS + + return m.GetString(); +} + +// Returns true if exit_status describes a process that was terminated +// by a signal, or exited normally with a nonzero exit code. +bool ExitedUnsuccessfully(int exit_status) { + return !ExitedWithCode(0)(exit_status); +} + +# if !GTEST_OS_WINDOWS +// Generates a textual failure message when a death test finds more than +// one thread running, or cannot determine the number of threads, prior +// to executing the given statement. It is the responsibility of the +// caller not to pass a thread_count of 1. +static std::string DeathTestThreadWarning(size_t thread_count) { + Message msg; + msg << "Death tests use fork(), which is unsafe particularly" + << " in a threaded context. For this test, " << GTEST_NAME_ << " "; + if (thread_count == 0) + msg << "couldn't detect the number of threads."; + else + msg << "detected " << thread_count << " threads."; + return msg.GetString(); +} +# endif // !GTEST_OS_WINDOWS + +// Flag characters for reporting a death test that did not die. +static const char kDeathTestLived = 'L'; +static const char kDeathTestReturned = 'R'; +static const char kDeathTestThrew = 'T'; +static const char kDeathTestInternalError = 'I'; + +// An enumeration describing all of the possible ways that a death test can +// conclude. DIED means that the process died while executing the test +// code; LIVED means that process lived beyond the end of the test code; +// RETURNED means that the test statement attempted to execute a return +// statement, which is not allowed; THREW means that the test statement +// returned control by throwing an exception. IN_PROGRESS means the test +// has not yet concluded. +// TODO(vladl@google.com): Unify names and possibly values for +// AbortReason, DeathTestOutcome, and flag characters above. +enum DeathTestOutcome { IN_PROGRESS, DIED, LIVED, RETURNED, THREW }; + +// Routine for aborting the program which is safe to call from an +// exec-style death test child process, in which case the error +// message is propagated back to the parent process. Otherwise, the +// message is simply printed to stderr. In either case, the program +// then exits with status 1. +void DeathTestAbort(const std::string& message) { + // On a POSIX system, this function may be called from a threadsafe-style + // death test child process, which operates on a very small stack. Use + // the heap for any additional non-minuscule memory requirements. + const InternalRunDeathTestFlag* const flag = + GetUnitTestImpl()->internal_run_death_test_flag(); + if (flag != NULL) { + FILE* parent = posix::FDOpen(flag->write_fd(), "w"); + fputc(kDeathTestInternalError, parent); + fprintf(parent, "%s", message.c_str()); + fflush(parent); + _exit(1); + } else { + fprintf(stderr, "%s", message.c_str()); + fflush(stderr); + posix::Abort(); + } +} + +// A replacement for CHECK that calls DeathTestAbort if the assertion +// fails. +# define GTEST_DEATH_TEST_CHECK_(expression) \ + do { \ + if (!::testing::internal::IsTrue(expression)) { \ + DeathTestAbort( \ + ::std::string("CHECK failed: File ") + __FILE__ + ", line " \ + + ::testing::internal::StreamableToString(__LINE__) + ": " \ + + #expression); \ + } \ + } while (::testing::internal::AlwaysFalse()) + +// This macro is similar to GTEST_DEATH_TEST_CHECK_, but it is meant for +// evaluating any system call that fulfills two conditions: it must return +// -1 on failure, and set errno to EINTR when it is interrupted and +// should be tried again. The macro expands to a loop that repeatedly +// evaluates the expression as long as it evaluates to -1 and sets +// errno to EINTR. If the expression evaluates to -1 but errno is +// something other than EINTR, DeathTestAbort is called. +# define GTEST_DEATH_TEST_CHECK_SYSCALL_(expression) \ + do { \ + int gtest_retval; \ + do { \ + gtest_retval = (expression); \ + } while (gtest_retval == -1 && errno == EINTR); \ + if (gtest_retval == -1) { \ + DeathTestAbort( \ + ::std::string("CHECK failed: File ") + __FILE__ + ", line " \ + + ::testing::internal::StreamableToString(__LINE__) + ": " \ + + #expression + " != -1"); \ + } \ + } while (::testing::internal::AlwaysFalse()) + +// Returns the message describing the last system error in errno. +std::string GetLastErrnoDescription() { + return errno == 0 ? "" : posix::StrError(errno); +} + +// This is called from a death test parent process to read a failure +// message from the death test child process and log it with the FATAL +// severity. On Windows, the message is read from a pipe handle. On other +// platforms, it is read from a file descriptor. +static void FailFromInternalError(int fd) { + Message error; + char buffer[256]; + int num_read; + + do { + while ((num_read = posix::Read(fd, buffer, 255)) > 0) { + buffer[num_read] = '\0'; + error << buffer; + } + } while (num_read == -1 && errno == EINTR); + + if (num_read == 0) { + GTEST_LOG_(FATAL) << error.GetString(); + } else { + const int last_error = errno; + GTEST_LOG_(FATAL) << "Error while reading death test internal: " + << GetLastErrnoDescription() << " [" << last_error << "]"; + } +} + +// Death test constructor. Increments the running death test count +// for the current test. +DeathTest::DeathTest() { + TestInfo* const info = GetUnitTestImpl()->current_test_info(); + if (info == NULL) { + DeathTestAbort("Cannot run a death test outside of a TEST or " + "TEST_F construct"); + } +} + +// Creates and returns a death test by dispatching to the current +// death test factory. +bool DeathTest::Create(const char* statement, const RE* regex, + const char* file, int line, DeathTest** test) { + return GetUnitTestImpl()->death_test_factory()->Create( + statement, regex, file, line, test); +} + +const char* DeathTest::LastMessage() { + return last_death_test_message_.c_str(); +} + +void DeathTest::set_last_death_test_message(const std::string& message) { + last_death_test_message_ = message; +} + +std::string DeathTest::last_death_test_message_; + +// Provides cross platform implementation for some death functionality. +class DeathTestImpl : public DeathTest { + protected: + DeathTestImpl(const char* a_statement, const RE* a_regex) + : statement_(a_statement), + regex_(a_regex), + spawned_(false), + status_(-1), + outcome_(IN_PROGRESS), + read_fd_(-1), + write_fd_(-1) {} + + // read_fd_ is expected to be closed and cleared by a derived class. + ~DeathTestImpl() { GTEST_DEATH_TEST_CHECK_(read_fd_ == -1); } + + void Abort(AbortReason reason); + virtual bool Passed(bool status_ok); + + const char* statement() const { return statement_; } + const RE* regex() const { return regex_; } + bool spawned() const { return spawned_; } + void set_spawned(bool is_spawned) { spawned_ = is_spawned; } + int status() const { return status_; } + void set_status(int a_status) { status_ = a_status; } + DeathTestOutcome outcome() const { return outcome_; } + void set_outcome(DeathTestOutcome an_outcome) { outcome_ = an_outcome; } + int read_fd() const { return read_fd_; } + void set_read_fd(int fd) { read_fd_ = fd; } + int write_fd() const { return write_fd_; } + void set_write_fd(int fd) { write_fd_ = fd; } + + // Called in the parent process only. Reads the result code of the death + // test child process via a pipe, interprets it to set the outcome_ + // member, and closes read_fd_. Outputs diagnostics and terminates in + // case of unexpected codes. + void ReadAndInterpretStatusByte(); + + private: + // The textual content of the code this object is testing. This class + // doesn't own this string and should not attempt to delete it. + const char* const statement_; + // The regular expression which test output must match. DeathTestImpl + // doesn't own this object and should not attempt to delete it. + const RE* const regex_; + // True if the death test child process has been successfully spawned. + bool spawned_; + // The exit status of the child process. + int status_; + // How the death test concluded. + DeathTestOutcome outcome_; + // Descriptor to the read end of the pipe to the child process. It is + // always -1 in the child process. The child keeps its write end of the + // pipe in write_fd_. + int read_fd_; + // Descriptor to the child's write end of the pipe to the parent process. + // It is always -1 in the parent process. The parent keeps its end of the + // pipe in read_fd_. + int write_fd_; +}; + +// Called in the parent process only. Reads the result code of the death +// test child process via a pipe, interprets it to set the outcome_ +// member, and closes read_fd_. Outputs diagnostics and terminates in +// case of unexpected codes. +void DeathTestImpl::ReadAndInterpretStatusByte() { + char flag; + int bytes_read; + + // The read() here blocks until data is available (signifying the + // failure of the death test) or until the pipe is closed (signifying + // its success), so it's okay to call this in the parent before + // the child process has exited. + do { + bytes_read = posix::Read(read_fd(), &flag, 1); + } while (bytes_read == -1 && errno == EINTR); + + if (bytes_read == 0) { + set_outcome(DIED); + } else if (bytes_read == 1) { + switch (flag) { + case kDeathTestReturned: + set_outcome(RETURNED); + break; + case kDeathTestThrew: + set_outcome(THREW); + break; + case kDeathTestLived: + set_outcome(LIVED); + break; + case kDeathTestInternalError: + FailFromInternalError(read_fd()); // Does not return. + break; + default: + GTEST_LOG_(FATAL) << "Death test child process reported " + << "unexpected status byte (" + << static_cast(flag) << ")"; + } + } else { + GTEST_LOG_(FATAL) << "Read from death test child process failed: " + << GetLastErrnoDescription(); + } + GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Close(read_fd())); + set_read_fd(-1); +} + +// Signals that the death test code which should have exited, didn't. +// Should be called only in a death test child process. +// Writes a status byte to the child's status file descriptor, then +// calls _exit(1). +void DeathTestImpl::Abort(AbortReason reason) { + // The parent process considers the death test to be a failure if + // it finds any data in our pipe. So, here we write a single flag byte + // to the pipe, then exit. + const char status_ch = + reason == TEST_DID_NOT_DIE ? kDeathTestLived : + reason == TEST_THREW_EXCEPTION ? kDeathTestThrew : kDeathTestReturned; + + GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Write(write_fd(), &status_ch, 1)); + // We are leaking the descriptor here because on some platforms (i.e., + // when built as Windows DLL), destructors of global objects will still + // run after calling _exit(). On such systems, write_fd_ will be + // indirectly closed from the destructor of UnitTestImpl, causing double + // close if it is also closed here. On debug configurations, double close + // may assert. As there are no in-process buffers to flush here, we are + // relying on the OS to close the descriptor after the process terminates + // when the destructors are not run. + _exit(1); // Exits w/o any normal exit hooks (we were supposed to crash) +} + +// Returns an indented copy of stderr output for a death test. +// This makes distinguishing death test output lines from regular log lines +// much easier. +static ::std::string FormatDeathTestOutput(const ::std::string& output) { + ::std::string ret; + for (size_t at = 0; ; ) { + const size_t line_end = output.find('\n', at); + ret += "[ DEATH ] "; + if (line_end == ::std::string::npos) { + ret += output.substr(at); + break; + } + ret += output.substr(at, line_end + 1 - at); + at = line_end + 1; + } + return ret; +} + +// Assesses the success or failure of a death test, using both private +// members which have previously been set, and one argument: +// +// Private data members: +// outcome: An enumeration describing how the death test +// concluded: DIED, LIVED, THREW, or RETURNED. The death test +// fails in the latter three cases. +// status: The exit status of the child process. On *nix, it is in the +// in the format specified by wait(2). On Windows, this is the +// value supplied to the ExitProcess() API or a numeric code +// of the exception that terminated the program. +// regex: A regular expression object to be applied to +// the test's captured standard error output; the death test +// fails if it does not match. +// +// Argument: +// status_ok: true if exit_status is acceptable in the context of +// this particular death test, which fails if it is false +// +// Returns true iff all of the above conditions are met. Otherwise, the +// first failing condition, in the order given above, is the one that is +// reported. Also sets the last death test message string. +bool DeathTestImpl::Passed(bool status_ok) { + if (!spawned()) + return false; + + const std::string error_message = GetCapturedStderr(); + + bool success = false; + Message buffer; + + buffer << "Death test: " << statement() << "\n"; + switch (outcome()) { + case LIVED: + buffer << " Result: failed to die.\n" + << " Error msg:\n" << FormatDeathTestOutput(error_message); + break; + case THREW: + buffer << " Result: threw an exception.\n" + << " Error msg:\n" << FormatDeathTestOutput(error_message); + break; + case RETURNED: + buffer << " Result: illegal return in test statement.\n" + << " Error msg:\n" << FormatDeathTestOutput(error_message); + break; + case DIED: + if (status_ok) { + const bool matched = RE::PartialMatch(error_message.c_str(), *regex()); + if (matched) { + success = true; + } else { + buffer << " Result: died but not with expected error.\n" + << " Expected: " << regex()->pattern() << "\n" + << "Actual msg:\n" << FormatDeathTestOutput(error_message); + } + } else { + buffer << " Result: died but not with expected exit code:\n" + << " " << ExitSummary(status()) << "\n" + << "Actual msg:\n" << FormatDeathTestOutput(error_message); + } + break; + case IN_PROGRESS: + default: + GTEST_LOG_(FATAL) + << "DeathTest::Passed somehow called before conclusion of test"; + } + + DeathTest::set_last_death_test_message(buffer.GetString()); + return success; +} + +# if GTEST_OS_WINDOWS +// WindowsDeathTest implements death tests on Windows. Due to the +// specifics of starting new processes on Windows, death tests there are +// always threadsafe, and Google Test considers the +// --gtest_death_test_style=fast setting to be equivalent to +// --gtest_death_test_style=threadsafe there. +// +// A few implementation notes: Like the Linux version, the Windows +// implementation uses pipes for child-to-parent communication. But due to +// the specifics of pipes on Windows, some extra steps are required: +// +// 1. The parent creates a communication pipe and stores handles to both +// ends of it. +// 2. The parent starts the child and provides it with the information +// necessary to acquire the handle to the write end of the pipe. +// 3. The child acquires the write end of the pipe and signals the parent +// using a Windows event. +// 4. Now the parent can release the write end of the pipe on its side. If +// this is done before step 3, the object's reference count goes down to +// 0 and it is destroyed, preventing the child from acquiring it. The +// parent now has to release it, or read operations on the read end of +// the pipe will not return when the child terminates. +// 5. The parent reads child's output through the pipe (outcome code and +// any possible error messages) from the pipe, and its stderr and then +// determines whether to fail the test. +// +// Note: to distinguish Win32 API calls from the local method and function +// calls, the former are explicitly resolved in the global namespace. +// +class WindowsDeathTest : public DeathTestImpl { + public: + WindowsDeathTest(const char* a_statement, + const RE* a_regex, + const char* file, + int line) + : DeathTestImpl(a_statement, a_regex), file_(file), line_(line) {} + + // All of these virtual functions are inherited from DeathTest. + virtual int Wait(); + virtual TestRole AssumeRole(); + + private: + // The name of the file in which the death test is located. + const char* const file_; + // The line number on which the death test is located. + const int line_; + // Handle to the write end of the pipe to the child process. + AutoHandle write_handle_; + // Child process handle. + AutoHandle child_handle_; + // Event the child process uses to signal the parent that it has + // acquired the handle to the write end of the pipe. After seeing this + // event the parent can release its own handles to make sure its + // ReadFile() calls return when the child terminates. + AutoHandle event_handle_; +}; + +// Waits for the child in a death test to exit, returning its exit +// status, or 0 if no child process exists. As a side effect, sets the +// outcome data member. +int WindowsDeathTest::Wait() { + if (!spawned()) + return 0; + + // Wait until the child either signals that it has acquired the write end + // of the pipe or it dies. + const HANDLE wait_handles[2] = { child_handle_.Get(), event_handle_.Get() }; + switch (::WaitForMultipleObjects(2, + wait_handles, + FALSE, // Waits for any of the handles. + INFINITE)) { + case WAIT_OBJECT_0: + case WAIT_OBJECT_0 + 1: + break; + default: + GTEST_DEATH_TEST_CHECK_(false); // Should not get here. + } + + // The child has acquired the write end of the pipe or exited. + // We release the handle on our side and continue. + write_handle_.Reset(); + event_handle_.Reset(); + + ReadAndInterpretStatusByte(); + + // Waits for the child process to exit if it haven't already. This + // returns immediately if the child has already exited, regardless of + // whether previous calls to WaitForMultipleObjects synchronized on this + // handle or not. + GTEST_DEATH_TEST_CHECK_( + WAIT_OBJECT_0 == ::WaitForSingleObject(child_handle_.Get(), + INFINITE)); + DWORD status_code; + GTEST_DEATH_TEST_CHECK_( + ::GetExitCodeProcess(child_handle_.Get(), &status_code) != FALSE); + child_handle_.Reset(); + set_status(static_cast(status_code)); + return status(); +} + +// The AssumeRole process for a Windows death test. It creates a child +// process with the same executable as the current process to run the +// death test. The child process is given the --gtest_filter and +// --gtest_internal_run_death_test flags such that it knows to run the +// current death test only. +DeathTest::TestRole WindowsDeathTest::AssumeRole() { + const UnitTestImpl* const impl = GetUnitTestImpl(); + const InternalRunDeathTestFlag* const flag = + impl->internal_run_death_test_flag(); + const TestInfo* const info = impl->current_test_info(); + const int death_test_index = info->result()->death_test_count(); + + if (flag != NULL) { + // ParseInternalRunDeathTestFlag() has performed all the necessary + // processing. + set_write_fd(flag->write_fd()); + return EXECUTE_TEST; + } + + // WindowsDeathTest uses an anonymous pipe to communicate results of + // a death test. + SECURITY_ATTRIBUTES handles_are_inheritable = { + sizeof(SECURITY_ATTRIBUTES), NULL, TRUE }; + HANDLE read_handle, write_handle; + GTEST_DEATH_TEST_CHECK_( + ::CreatePipe(&read_handle, &write_handle, &handles_are_inheritable, + 0) // Default buffer size. + != FALSE); + set_read_fd(::_open_osfhandle(reinterpret_cast(read_handle), + O_RDONLY)); + write_handle_.Reset(write_handle); + event_handle_.Reset(::CreateEvent( + &handles_are_inheritable, + TRUE, // The event will automatically reset to non-signaled state. + FALSE, // The initial state is non-signalled. + NULL)); // The even is unnamed. + GTEST_DEATH_TEST_CHECK_(event_handle_.Get() != NULL); + const std::string filter_flag = + std::string("--") + GTEST_FLAG_PREFIX_ + kFilterFlag + "=" + + info->test_case_name() + "." + info->name(); + const std::string internal_flag = + std::string("--") + GTEST_FLAG_PREFIX_ + kInternalRunDeathTestFlag + + "=" + file_ + "|" + StreamableToString(line_) + "|" + + StreamableToString(death_test_index) + "|" + + StreamableToString(static_cast(::GetCurrentProcessId())) + + // size_t has the same width as pointers on both 32-bit and 64-bit + // Windows platforms. + // See http://msdn.microsoft.com/en-us/library/tcxf1dw6.aspx. + "|" + StreamableToString(reinterpret_cast(write_handle)) + + "|" + StreamableToString(reinterpret_cast(event_handle_.Get())); + + char executable_path[_MAX_PATH + 1]; // NOLINT + GTEST_DEATH_TEST_CHECK_( + _MAX_PATH + 1 != ::GetModuleFileNameA(NULL, + executable_path, + _MAX_PATH)); + + std::string command_line = + std::string(::GetCommandLineA()) + " " + filter_flag + " \"" + + internal_flag + "\""; + + DeathTest::set_last_death_test_message(""); + + CaptureStderr(); + // Flush the log buffers since the log streams are shared with the child. + FlushInfoLog(); + + // The child process will share the standard handles with the parent. + STARTUPINFOA startup_info; + memset(&startup_info, 0, sizeof(STARTUPINFO)); + startup_info.dwFlags = STARTF_USESTDHANDLES; + startup_info.hStdInput = ::GetStdHandle(STD_INPUT_HANDLE); + startup_info.hStdOutput = ::GetStdHandle(STD_OUTPUT_HANDLE); + startup_info.hStdError = ::GetStdHandle(STD_ERROR_HANDLE); + + PROCESS_INFORMATION process_info; + GTEST_DEATH_TEST_CHECK_(::CreateProcessA( + executable_path, + const_cast(command_line.c_str()), + NULL, // Retuned process handle is not inheritable. + NULL, // Retuned thread handle is not inheritable. + TRUE, // Child inherits all inheritable handles (for write_handle_). + 0x0, // Default creation flags. + NULL, // Inherit the parent's environment. + UnitTest::GetInstance()->original_working_dir(), + &startup_info, + &process_info) != FALSE); + child_handle_.Reset(process_info.hProcess); + ::CloseHandle(process_info.hThread); + set_spawned(true); + return OVERSEE_TEST; +} +# else // We are not on Windows. + +// ForkingDeathTest provides implementations for most of the abstract +// methods of the DeathTest interface. Only the AssumeRole method is +// left undefined. +class ForkingDeathTest : public DeathTestImpl { + public: + ForkingDeathTest(const char* statement, const RE* regex); + + // All of these virtual functions are inherited from DeathTest. + virtual int Wait(); + + protected: + void set_child_pid(pid_t child_pid) { child_pid_ = child_pid; } + + private: + // PID of child process during death test; 0 in the child process itself. + pid_t child_pid_; +}; + +// Constructs a ForkingDeathTest. +ForkingDeathTest::ForkingDeathTest(const char* a_statement, const RE* a_regex) + : DeathTestImpl(a_statement, a_regex), + child_pid_(-1) {} + +// Waits for the child in a death test to exit, returning its exit +// status, or 0 if no child process exists. As a side effect, sets the +// outcome data member. +int ForkingDeathTest::Wait() { + if (!spawned()) + return 0; + + ReadAndInterpretStatusByte(); + + int status_value; + GTEST_DEATH_TEST_CHECK_SYSCALL_(waitpid(child_pid_, &status_value, 0)); + set_status(status_value); + return status_value; +} + +// A concrete death test class that forks, then immediately runs the test +// in the child process. +class NoExecDeathTest : public ForkingDeathTest { + public: + NoExecDeathTest(const char* a_statement, const RE* a_regex) : + ForkingDeathTest(a_statement, a_regex) { } + virtual TestRole AssumeRole(); +}; + +// The AssumeRole process for a fork-and-run death test. It implements a +// straightforward fork, with a simple pipe to transmit the status byte. +DeathTest::TestRole NoExecDeathTest::AssumeRole() { + const size_t thread_count = GetThreadCount(); + if (thread_count != 1) { + GTEST_LOG_(WARNING) << DeathTestThreadWarning(thread_count); + } + + int pipe_fd[2]; + GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1); + + DeathTest::set_last_death_test_message(""); + CaptureStderr(); + // When we fork the process below, the log file buffers are copied, but the + // file descriptors are shared. We flush all log files here so that closing + // the file descriptors in the child process doesn't throw off the + // synchronization between descriptors and buffers in the parent process. + // This is as close to the fork as possible to avoid a race condition in case + // there are multiple threads running before the death test, and another + // thread writes to the log file. + FlushInfoLog(); + + const pid_t child_pid = fork(); + GTEST_DEATH_TEST_CHECK_(child_pid != -1); + set_child_pid(child_pid); + if (child_pid == 0) { + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[0])); + set_write_fd(pipe_fd[1]); + // Redirects all logging to stderr in the child process to prevent + // concurrent writes to the log files. We capture stderr in the parent + // process and append the child process' output to a log. + LogToStderr(); + // Event forwarding to the listeners of event listener API mush be shut + // down in death test subprocesses. + GetUnitTestImpl()->listeners()->SuppressEventForwarding(); + g_in_fast_death_test_child = true; + return EXECUTE_TEST; + } else { + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1])); + set_read_fd(pipe_fd[0]); + set_spawned(true); + return OVERSEE_TEST; + } +} + +// A concrete death test class that forks and re-executes the main +// program from the beginning, with command-line flags set that cause +// only this specific death test to be run. +class ExecDeathTest : public ForkingDeathTest { + public: + ExecDeathTest(const char* a_statement, const RE* a_regex, + const char* file, int line) : + ForkingDeathTest(a_statement, a_regex), file_(file), line_(line) { } + virtual TestRole AssumeRole(); + private: + static ::std::vector + GetArgvsForDeathTestChildProcess() { + ::std::vector args = GetInjectableArgvs(); + return args; + } + // The name of the file in which the death test is located. + const char* const file_; + // The line number on which the death test is located. + const int line_; +}; + +// Utility class for accumulating command-line arguments. +class Arguments { + public: + Arguments() { + args_.push_back(NULL); + } + + ~Arguments() { + for (std::vector::iterator i = args_.begin(); i != args_.end(); + ++i) { + free(*i); + } + } + void AddArgument(const char* argument) { + args_.insert(args_.end() - 1, posix::StrDup(argument)); + } + + template + void AddArguments(const ::std::vector& arguments) { + for (typename ::std::vector::const_iterator i = arguments.begin(); + i != arguments.end(); + ++i) { + args_.insert(args_.end() - 1, posix::StrDup(i->c_str())); + } + } + char* const* Argv() { + return &args_[0]; + } + + private: + std::vector args_; +}; + +// A struct that encompasses the arguments to the child process of a +// threadsafe-style death test process. +struct ExecDeathTestArgs { + char* const* argv; // Command-line arguments for the child's call to exec + int close_fd; // File descriptor to close; the read end of a pipe +}; + +# if GTEST_OS_MAC +inline char** GetEnviron() { + // When Google Test is built as a framework on MacOS X, the environ variable + // is unavailable. Apple's documentation (man environ) recommends using + // _NSGetEnviron() instead. + return *_NSGetEnviron(); +} +# else +// Some POSIX platforms expect you to declare environ. extern "C" makes +// it reside in the global namespace. +extern "C" char** environ; +inline char** GetEnviron() { return environ; } +# endif // GTEST_OS_MAC + +# if !GTEST_OS_QNX +// The main function for a threadsafe-style death test child process. +// This function is called in a clone()-ed process and thus must avoid +// any potentially unsafe operations like malloc or libc functions. +static int ExecDeathTestChildMain(void* child_arg) { + ExecDeathTestArgs* const args = static_cast(child_arg); + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(args->close_fd)); + + // We need to execute the test program in the same environment where + // it was originally invoked. Therefore we change to the original + // working directory first. + const char* const original_dir = + UnitTest::GetInstance()->original_working_dir(); + // We can safely call chdir() as it's a direct system call. + if (chdir(original_dir) != 0) { + DeathTestAbort(std::string("chdir(\"") + original_dir + "\") failed: " + + GetLastErrnoDescription()); + return EXIT_FAILURE; + } + + // We can safely call execve() as it's a direct system call. We + // cannot use execvp() as it's a libc function and thus potentially + // unsafe. Since execve() doesn't search the PATH, the user must + // invoke the test program via a valid path that contains at least + // one path separator. + execve(args->argv[0], args->argv, GetEnviron()); + DeathTestAbort(std::string("execve(") + args->argv[0] + ", ...) in " + + original_dir + " failed: " + + GetLastErrnoDescription()); + return EXIT_FAILURE; +} +# endif // !GTEST_OS_QNX + +// Two utility routines that together determine the direction the stack +// grows. +// This could be accomplished more elegantly by a single recursive +// function, but we want to guard against the unlikely possibility of +// a smart compiler optimizing the recursion away. +// +// GTEST_NO_INLINE_ is required to prevent GCC 4.6 from inlining +// StackLowerThanAddress into StackGrowsDown, which then doesn't give +// correct answer. +void StackLowerThanAddress(const void* ptr, bool* result) GTEST_NO_INLINE_; +void StackLowerThanAddress(const void* ptr, bool* result) { + int dummy; + *result = (&dummy < ptr); +} + +bool StackGrowsDown() { + int dummy; + bool result; + StackLowerThanAddress(&dummy, &result); + return result; +} + +// Spawns a child process with the same executable as the current process in +// a thread-safe manner and instructs it to run the death test. The +// implementation uses fork(2) + exec. On systems where clone(2) is +// available, it is used instead, being slightly more thread-safe. On QNX, +// fork supports only single-threaded environments, so this function uses +// spawn(2) there instead. The function dies with an error message if +// anything goes wrong. +static pid_t ExecDeathTestSpawnChild(char* const* argv, int close_fd) { + ExecDeathTestArgs args = { argv, close_fd }; + pid_t child_pid = -1; + +# if GTEST_OS_QNX + // Obtains the current directory and sets it to be closed in the child + // process. + const int cwd_fd = open(".", O_RDONLY); + GTEST_DEATH_TEST_CHECK_(cwd_fd != -1); + GTEST_DEATH_TEST_CHECK_SYSCALL_(fcntl(cwd_fd, F_SETFD, FD_CLOEXEC)); + // We need to execute the test program in the same environment where + // it was originally invoked. Therefore we change to the original + // working directory first. + const char* const original_dir = + UnitTest::GetInstance()->original_working_dir(); + // We can safely call chdir() as it's a direct system call. + if (chdir(original_dir) != 0) { + DeathTestAbort(std::string("chdir(\"") + original_dir + "\") failed: " + + GetLastErrnoDescription()); + return EXIT_FAILURE; + } + + int fd_flags; + // Set close_fd to be closed after spawn. + GTEST_DEATH_TEST_CHECK_SYSCALL_(fd_flags = fcntl(close_fd, F_GETFD)); + GTEST_DEATH_TEST_CHECK_SYSCALL_(fcntl(close_fd, F_SETFD, + fd_flags | FD_CLOEXEC)); + struct inheritance inherit = {0}; + // spawn is a system call. + child_pid = spawn(args.argv[0], 0, NULL, &inherit, args.argv, GetEnviron()); + // Restores the current working directory. + GTEST_DEATH_TEST_CHECK_(fchdir(cwd_fd) != -1); + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(cwd_fd)); + +# else // GTEST_OS_QNX +# if GTEST_OS_LINUX + // When a SIGPROF signal is received while fork() or clone() are executing, + // the process may hang. To avoid this, we ignore SIGPROF here and re-enable + // it after the call to fork()/clone() is complete. + struct sigaction saved_sigprof_action; + struct sigaction ignore_sigprof_action; + memset(&ignore_sigprof_action, 0, sizeof(ignore_sigprof_action)); + sigemptyset(&ignore_sigprof_action.sa_mask); + ignore_sigprof_action.sa_handler = SIG_IGN; + GTEST_DEATH_TEST_CHECK_SYSCALL_(sigaction( + SIGPROF, &ignore_sigprof_action, &saved_sigprof_action)); +# endif // GTEST_OS_LINUX + +# if GTEST_HAS_CLONE + const bool use_fork = GTEST_FLAG(death_test_use_fork); + + if (!use_fork) { + static const bool stack_grows_down = StackGrowsDown(); + const size_t stack_size = getpagesize(); + // MMAP_ANONYMOUS is not defined on Mac, so we use MAP_ANON instead. + void* const stack = mmap(NULL, stack_size, PROT_READ | PROT_WRITE, + MAP_ANON | MAP_PRIVATE, -1, 0); + GTEST_DEATH_TEST_CHECK_(stack != MAP_FAILED); + + // Maximum stack alignment in bytes: For a downward-growing stack, this + // amount is subtracted from size of the stack space to get an address + // that is within the stack space and is aligned on all systems we care + // about. As far as I know there is no ABI with stack alignment greater + // than 64. We assume stack and stack_size already have alignment of + // kMaxStackAlignment. + const size_t kMaxStackAlignment = 64; + void* const stack_top = + static_cast(stack) + + (stack_grows_down ? stack_size - kMaxStackAlignment : 0); + GTEST_DEATH_TEST_CHECK_(stack_size > kMaxStackAlignment && + reinterpret_cast(stack_top) % kMaxStackAlignment == 0); + + child_pid = clone(&ExecDeathTestChildMain, stack_top, SIGCHLD, &args); + + GTEST_DEATH_TEST_CHECK_(munmap(stack, stack_size) != -1); + } +# else + const bool use_fork = true; +# endif // GTEST_HAS_CLONE + + if (use_fork && (child_pid = fork()) == 0) { + ExecDeathTestChildMain(&args); + _exit(0); + } +# endif // GTEST_OS_QNX +# if GTEST_OS_LINUX + GTEST_DEATH_TEST_CHECK_SYSCALL_( + sigaction(SIGPROF, &saved_sigprof_action, NULL)); +# endif // GTEST_OS_LINUX + + GTEST_DEATH_TEST_CHECK_(child_pid != -1); + return child_pid; +} + +// The AssumeRole process for a fork-and-exec death test. It re-executes the +// main program from the beginning, setting the --gtest_filter +// and --gtest_internal_run_death_test flags to cause only the current +// death test to be re-run. +DeathTest::TestRole ExecDeathTest::AssumeRole() { + const UnitTestImpl* const impl = GetUnitTestImpl(); + const InternalRunDeathTestFlag* const flag = + impl->internal_run_death_test_flag(); + const TestInfo* const info = impl->current_test_info(); + const int death_test_index = info->result()->death_test_count(); + + if (flag != NULL) { + set_write_fd(flag->write_fd()); + return EXECUTE_TEST; + } + + int pipe_fd[2]; + GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1); + // Clear the close-on-exec flag on the write end of the pipe, lest + // it be closed when the child process does an exec: + GTEST_DEATH_TEST_CHECK_(fcntl(pipe_fd[1], F_SETFD, 0) != -1); + + const std::string filter_flag = + std::string("--") + GTEST_FLAG_PREFIX_ + kFilterFlag + "=" + + info->test_case_name() + "." + info->name(); + const std::string internal_flag = + std::string("--") + GTEST_FLAG_PREFIX_ + kInternalRunDeathTestFlag + "=" + + file_ + "|" + StreamableToString(line_) + "|" + + StreamableToString(death_test_index) + "|" + + StreamableToString(pipe_fd[1]); + Arguments args; + args.AddArguments(GetArgvsForDeathTestChildProcess()); + args.AddArgument(filter_flag.c_str()); + args.AddArgument(internal_flag.c_str()); + + DeathTest::set_last_death_test_message(""); + + CaptureStderr(); + // See the comment in NoExecDeathTest::AssumeRole for why the next line + // is necessary. + FlushInfoLog(); + + const pid_t child_pid = ExecDeathTestSpawnChild(args.Argv(), pipe_fd[0]); + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1])); + set_child_pid(child_pid); + set_read_fd(pipe_fd[0]); + set_spawned(true); + return OVERSEE_TEST; +} + +# endif // !GTEST_OS_WINDOWS + +// Creates a concrete DeathTest-derived class that depends on the +// --gtest_death_test_style flag, and sets the pointer pointed to +// by the "test" argument to its address. If the test should be +// skipped, sets that pointer to NULL. Returns true, unless the +// flag is set to an invalid value. +bool DefaultDeathTestFactory::Create(const char* statement, const RE* regex, + const char* file, int line, + DeathTest** test) { + UnitTestImpl* const impl = GetUnitTestImpl(); + const InternalRunDeathTestFlag* const flag = + impl->internal_run_death_test_flag(); + const int death_test_index = impl->current_test_info() + ->increment_death_test_count(); + + if (flag != NULL) { + if (death_test_index > flag->index()) { + DeathTest::set_last_death_test_message( + "Death test count (" + StreamableToString(death_test_index) + + ") somehow exceeded expected maximum (" + + StreamableToString(flag->index()) + ")"); + return false; + } + + if (!(flag->file() == file && flag->line() == line && + flag->index() == death_test_index)) { + *test = NULL; + return true; + } + } + +# if GTEST_OS_WINDOWS + + if (GTEST_FLAG(death_test_style) == "threadsafe" || + GTEST_FLAG(death_test_style) == "fast") { + *test = new WindowsDeathTest(statement, regex, file, line); + } + +# else + + if (GTEST_FLAG(death_test_style) == "threadsafe") { + *test = new ExecDeathTest(statement, regex, file, line); + } else if (GTEST_FLAG(death_test_style) == "fast") { + *test = new NoExecDeathTest(statement, regex); + } + +# endif // GTEST_OS_WINDOWS + + else { // NOLINT - this is more readable than unbalanced brackets inside #if. + DeathTest::set_last_death_test_message( + "Unknown death test style \"" + GTEST_FLAG(death_test_style) + + "\" encountered"); + return false; + } + + return true; +} + +// Splits a given string on a given delimiter, populating a given +// vector with the fields. GTEST_HAS_DEATH_TEST implies that we have +// ::std::string, so we can use it here. +static void SplitString(const ::std::string& str, char delimiter, + ::std::vector< ::std::string>* dest) { + ::std::vector< ::std::string> parsed; + ::std::string::size_type pos = 0; + while (::testing::internal::AlwaysTrue()) { + const ::std::string::size_type colon = str.find(delimiter, pos); + if (colon == ::std::string::npos) { + parsed.push_back(str.substr(pos)); + break; + } else { + parsed.push_back(str.substr(pos, colon - pos)); + pos = colon + 1; + } + } + dest->swap(parsed); +} + +# if GTEST_OS_WINDOWS +// Recreates the pipe and event handles from the provided parameters, +// signals the event, and returns a file descriptor wrapped around the pipe +// handle. This function is called in the child process only. +int GetStatusFileDescriptor(unsigned int parent_process_id, + size_t write_handle_as_size_t, + size_t event_handle_as_size_t) { + AutoHandle parent_process_handle(::OpenProcess(PROCESS_DUP_HANDLE, + FALSE, // Non-inheritable. + parent_process_id)); + if (parent_process_handle.Get() == INVALID_HANDLE_VALUE) { + DeathTestAbort("Unable to open parent process " + + StreamableToString(parent_process_id)); + } + + // TODO(vladl@google.com): Replace the following check with a + // compile-time assertion when available. + GTEST_CHECK_(sizeof(HANDLE) <= sizeof(size_t)); + + const HANDLE write_handle = + reinterpret_cast(write_handle_as_size_t); + HANDLE dup_write_handle; + + // The newly initialized handle is accessible only in in the parent + // process. To obtain one accessible within the child, we need to use + // DuplicateHandle. + if (!::DuplicateHandle(parent_process_handle.Get(), write_handle, + ::GetCurrentProcess(), &dup_write_handle, + 0x0, // Requested privileges ignored since + // DUPLICATE_SAME_ACCESS is used. + FALSE, // Request non-inheritable handler. + DUPLICATE_SAME_ACCESS)) { + DeathTestAbort("Unable to duplicate the pipe handle " + + StreamableToString(write_handle_as_size_t) + + " from the parent process " + + StreamableToString(parent_process_id)); + } + + const HANDLE event_handle = reinterpret_cast(event_handle_as_size_t); + HANDLE dup_event_handle; + + if (!::DuplicateHandle(parent_process_handle.Get(), event_handle, + ::GetCurrentProcess(), &dup_event_handle, + 0x0, + FALSE, + DUPLICATE_SAME_ACCESS)) { + DeathTestAbort("Unable to duplicate the event handle " + + StreamableToString(event_handle_as_size_t) + + " from the parent process " + + StreamableToString(parent_process_id)); + } + + const int write_fd = + ::_open_osfhandle(reinterpret_cast(dup_write_handle), O_APPEND); + if (write_fd == -1) { + DeathTestAbort("Unable to convert pipe handle " + + StreamableToString(write_handle_as_size_t) + + " to a file descriptor"); + } + + // Signals the parent that the write end of the pipe has been acquired + // so the parent can release its own write end. + ::SetEvent(dup_event_handle); + + return write_fd; +} +# endif // GTEST_OS_WINDOWS + +// Returns a newly created InternalRunDeathTestFlag object with fields +// initialized from the GTEST_FLAG(internal_run_death_test) flag if +// the flag is specified; otherwise returns NULL. +InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag() { + if (GTEST_FLAG(internal_run_death_test) == "") return NULL; + + // GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we + // can use it here. + int line = -1; + int index = -1; + ::std::vector< ::std::string> fields; + SplitString(GTEST_FLAG(internal_run_death_test).c_str(), '|', &fields); + int write_fd = -1; + +# if GTEST_OS_WINDOWS + + unsigned int parent_process_id = 0; + size_t write_handle_as_size_t = 0; + size_t event_handle_as_size_t = 0; + + if (fields.size() != 6 + || !ParseNaturalNumber(fields[1], &line) + || !ParseNaturalNumber(fields[2], &index) + || !ParseNaturalNumber(fields[3], &parent_process_id) + || !ParseNaturalNumber(fields[4], &write_handle_as_size_t) + || !ParseNaturalNumber(fields[5], &event_handle_as_size_t)) { + DeathTestAbort("Bad --gtest_internal_run_death_test flag: " + + GTEST_FLAG(internal_run_death_test)); + } + write_fd = GetStatusFileDescriptor(parent_process_id, + write_handle_as_size_t, + event_handle_as_size_t); +# else + + if (fields.size() != 4 + || !ParseNaturalNumber(fields[1], &line) + || !ParseNaturalNumber(fields[2], &index) + || !ParseNaturalNumber(fields[3], &write_fd)) { + DeathTestAbort("Bad --gtest_internal_run_death_test flag: " + + GTEST_FLAG(internal_run_death_test)); + } + +# endif // GTEST_OS_WINDOWS + + return new InternalRunDeathTestFlag(fields[0], line, index, write_fd); +} + +} // namespace internal + +#endif // GTEST_HAS_DEATH_TEST + +} // namespace testing +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: keith.ray@gmail.com (Keith Ray) + + +#include + +#if GTEST_OS_WINDOWS_MOBILE +# include +#elif GTEST_OS_WINDOWS +# include +# include +#elif GTEST_OS_SYMBIAN +// Symbian OpenC has PATH_MAX in sys/syslimits.h +# include +#else +# include +# include // Some Linux distributions define PATH_MAX here. +#endif // GTEST_OS_WINDOWS_MOBILE + +#if GTEST_OS_WINDOWS +# define GTEST_PATH_MAX_ _MAX_PATH +#elif defined(PATH_MAX) +# define GTEST_PATH_MAX_ PATH_MAX +#elif defined(_XOPEN_PATH_MAX) +# define GTEST_PATH_MAX_ _XOPEN_PATH_MAX +#else +# define GTEST_PATH_MAX_ _POSIX_PATH_MAX +#endif // GTEST_OS_WINDOWS + + +namespace testing { +namespace internal { + +#if GTEST_OS_WINDOWS +// On Windows, '\\' is the standard path separator, but many tools and the +// Windows API also accept '/' as an alternate path separator. Unless otherwise +// noted, a file path can contain either kind of path separators, or a mixture +// of them. +const char kPathSeparator = '\\'; +const char kAlternatePathSeparator = '/'; +//const char kPathSeparatorString[] = "\\"; +const char kAlternatePathSeparatorString[] = "/"; +# if GTEST_OS_WINDOWS_MOBILE +// Windows CE doesn't have a current directory. You should not use +// the current directory in tests on Windows CE, but this at least +// provides a reasonable fallback. +const char kCurrentDirectoryString[] = "\\"; +// Windows CE doesn't define INVALID_FILE_ATTRIBUTES +const DWORD kInvalidFileAttributes = 0xffffffff; +# else +const char kCurrentDirectoryString[] = ".\\"; +# endif // GTEST_OS_WINDOWS_MOBILE +#else +const char kPathSeparator = '/'; +//const char kPathSeparatorString[] = "/"; +const char kCurrentDirectoryString[] = "./"; +#endif // GTEST_OS_WINDOWS + +// Returns whether the given character is a valid path separator. +static bool IsPathSeparator(char c) { +#if GTEST_HAS_ALT_PATH_SEP_ + return (c == kPathSeparator) || (c == kAlternatePathSeparator); +#else + return c == kPathSeparator; +#endif +} + +// Returns the current working directory, or "" if unsuccessful. +FilePath FilePath::GetCurrentDir() { +#if GTEST_OS_WINDOWS_MOBILE + // Windows CE doesn't have a current directory, so we just return + // something reasonable. + return FilePath(kCurrentDirectoryString); +#elif GTEST_OS_WINDOWS + char cwd[GTEST_PATH_MAX_ + 1] = { '\0' }; + return FilePath(_getcwd(cwd, sizeof(cwd)) == NULL ? "" : cwd); +#else + char cwd[GTEST_PATH_MAX_ + 1] = { '\0' }; + return FilePath(getcwd(cwd, sizeof(cwd)) == NULL ? "" : cwd); +#endif // GTEST_OS_WINDOWS_MOBILE +} + +// Returns a copy of the FilePath with the case-insensitive extension removed. +// Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns +// FilePath("dir/file"). If a case-insensitive extension is not +// found, returns a copy of the original FilePath. +FilePath FilePath::RemoveExtension(const char* extension) const { + const std::string dot_extension = std::string(".") + extension; + if (String::EndsWithCaseInsensitive(pathname_, dot_extension)) { + return FilePath(pathname_.substr( + 0, pathname_.length() - dot_extension.length())); + } + return *this; +} + +// Returns a pointer to the last occurence of a valid path separator in +// the FilePath. On Windows, for example, both '/' and '\' are valid path +// separators. Returns NULL if no path separator was found. +const char* FilePath::FindLastPathSeparator() const { + const char* const last_sep = strrchr(c_str(), kPathSeparator); +#if GTEST_HAS_ALT_PATH_SEP_ + const char* const last_alt_sep = strrchr(c_str(), kAlternatePathSeparator); + // Comparing two pointers of which only one is NULL is undefined. + if (last_alt_sep != NULL && + (last_sep == NULL || last_alt_sep > last_sep)) { + return last_alt_sep; + } +#endif + return last_sep; +} + +// Returns a copy of the FilePath with the directory part removed. +// Example: FilePath("path/to/file").RemoveDirectoryName() returns +// FilePath("file"). If there is no directory part ("just_a_file"), it returns +// the FilePath unmodified. If there is no file part ("just_a_dir/") it +// returns an empty FilePath (""). +// On Windows platform, '\' is the path separator, otherwise it is '/'. +FilePath FilePath::RemoveDirectoryName() const { + const char* const last_sep = FindLastPathSeparator(); + return last_sep ? FilePath(last_sep + 1) : *this; +} + +// RemoveFileName returns the directory path with the filename removed. +// Example: FilePath("path/to/file").RemoveFileName() returns "path/to/". +// If the FilePath is "a_file" or "/a_file", RemoveFileName returns +// FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does +// not have a file, like "just/a/dir/", it returns the FilePath unmodified. +// On Windows platform, '\' is the path separator, otherwise it is '/'. +FilePath FilePath::RemoveFileName() const { + const char* const last_sep = FindLastPathSeparator(); + std::string dir; + if (last_sep) { + dir = std::string(c_str(), last_sep + 1 - c_str()); + } else { + dir = kCurrentDirectoryString; + } + return FilePath(dir); +} + +// Helper functions for naming files in a directory for xml output. + +// Given directory = "dir", base_name = "test", number = 0, +// extension = "xml", returns "dir/test.xml". If number is greater +// than zero (e.g., 12), returns "dir/test_12.xml". +// On Windows platform, uses \ as the separator rather than /. +FilePath FilePath::MakeFileName(const FilePath& directory, + const FilePath& base_name, + int number, + const char* extension) { + std::string file; + if (number == 0) { + file = base_name.string() + "." + extension; + } else { + file = base_name.string() + "_" + StreamableToString(number) + + "." + extension; + } + return ConcatPaths(directory, FilePath(file)); +} + +// Given directory = "dir", relative_path = "test.xml", returns "dir/test.xml". +// On Windows, uses \ as the separator rather than /. +FilePath FilePath::ConcatPaths(const FilePath& directory, + const FilePath& relative_path) { + if (directory.IsEmpty()) + return relative_path; + const FilePath dir(directory.RemoveTrailingPathSeparator()); + return FilePath(dir.string() + kPathSeparator + relative_path.string()); +} + +// Returns true if pathname describes something findable in the file-system, +// either a file, directory, or whatever. +bool FilePath::FileOrDirectoryExists() const { +#if GTEST_OS_WINDOWS_MOBILE + LPCWSTR unicode = String::AnsiToUtf16(pathname_.c_str()); + const DWORD attributes = GetFileAttributes(unicode); + delete [] unicode; + return attributes != kInvalidFileAttributes; +#else + posix::StatStruct file_stat; + return posix::Stat(pathname_.c_str(), &file_stat) == 0; +#endif // GTEST_OS_WINDOWS_MOBILE +} + +// Returns true if pathname describes a directory in the file-system +// that exists. +bool FilePath::DirectoryExists() const { + bool result = false; +#if GTEST_OS_WINDOWS + // Don't strip off trailing separator if path is a root directory on + // Windows (like "C:\\"). + const FilePath& path(IsRootDirectory() ? *this : + RemoveTrailingPathSeparator()); +#else + const FilePath& path(*this); +#endif + +#if GTEST_OS_WINDOWS_MOBILE + LPCWSTR unicode = String::AnsiToUtf16(path.c_str()); + const DWORD attributes = GetFileAttributes(unicode); + delete [] unicode; + if ((attributes != kInvalidFileAttributes) && + (attributes & FILE_ATTRIBUTE_DIRECTORY)) { + result = true; + } +#else + posix::StatStruct file_stat; + result = posix::Stat(path.c_str(), &file_stat) == 0 && + posix::IsDir(file_stat); +#endif // GTEST_OS_WINDOWS_MOBILE + + return result; +} + +// Returns true if pathname describes a root directory. (Windows has one +// root directory per disk drive.) +bool FilePath::IsRootDirectory() const { +#if GTEST_OS_WINDOWS + // TODO(wan@google.com): on Windows a network share like + // \\server\share can be a root directory, although it cannot be the + // current directory. Handle this properly. + return pathname_.length() == 3 && IsAbsolutePath(); +#else + return pathname_.length() == 1 && IsPathSeparator(pathname_.c_str()[0]); +#endif +} + +// Returns true if pathname describes an absolute path. +bool FilePath::IsAbsolutePath() const { + const char* const name = pathname_.c_str(); +#if GTEST_OS_WINDOWS + return pathname_.length() >= 3 && + ((name[0] >= 'a' && name[0] <= 'z') || + (name[0] >= 'A' && name[0] <= 'Z')) && + name[1] == ':' && + IsPathSeparator(name[2]); +#else + return IsPathSeparator(name[0]); +#endif +} + +// Returns a pathname for a file that does not currently exist. The pathname +// will be directory/base_name.extension or +// directory/base_name_.extension if directory/base_name.extension +// already exists. The number will be incremented until a pathname is found +// that does not already exist. +// Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'. +// There could be a race condition if two or more processes are calling this +// function at the same time -- they could both pick the same filename. +FilePath FilePath::GenerateUniqueFileName(const FilePath& directory, + const FilePath& base_name, + const char* extension) { + FilePath full_pathname; + int number = 0; + do { + full_pathname.Set(MakeFileName(directory, base_name, number++, extension)); + } while (full_pathname.FileOrDirectoryExists()); + return full_pathname; +} + +// Returns true if FilePath ends with a path separator, which indicates that +// it is intended to represent a directory. Returns false otherwise. +// This does NOT check that a directory (or file) actually exists. +bool FilePath::IsDirectory() const { + return !pathname_.empty() && + IsPathSeparator(pathname_.c_str()[pathname_.length() - 1]); +} + +// Create directories so that path exists. Returns true if successful or if +// the directories already exist; returns false if unable to create directories +// for any reason. +bool FilePath::CreateDirectoriesRecursively() const { + if (!this->IsDirectory()) { + return false; + } + + if (pathname_.length() == 0 || this->DirectoryExists()) { + return true; + } + + const FilePath parent(this->RemoveTrailingPathSeparator().RemoveFileName()); + return parent.CreateDirectoriesRecursively() && this->CreateFolder(); +} + +// Create the directory so that path exists. Returns true if successful or +// if the directory already exists; returns false if unable to create the +// directory for any reason, including if the parent directory does not +// exist. Not named "CreateDirectory" because that's a macro on Windows. +bool FilePath::CreateFolder() const { +#if GTEST_OS_WINDOWS_MOBILE + FilePath removed_sep(this->RemoveTrailingPathSeparator()); + LPCWSTR unicode = String::AnsiToUtf16(removed_sep.c_str()); + int result = CreateDirectory(unicode, NULL) ? 0 : -1; + delete [] unicode; +#elif GTEST_OS_WINDOWS + int result = _mkdir(pathname_.c_str()); +#else + int result = mkdir(pathname_.c_str(), 0777); +#endif // GTEST_OS_WINDOWS_MOBILE + + if (result == -1) { + return this->DirectoryExists(); // An error is OK if the directory exists. + } + return true; // No error. +} + +// If input name has a trailing separator character, remove it and return the +// name, otherwise return the name string unmodified. +// On Windows platform, uses \ as the separator, other platforms use /. +FilePath FilePath::RemoveTrailingPathSeparator() const { + return IsDirectory() + ? FilePath(pathname_.substr(0, pathname_.length() - 1)) + : *this; +} + +// Removes any redundant separators that might be in the pathname. +// For example, "bar///foo" becomes "bar/foo". Does not eliminate other +// redundancies that might be in a pathname involving "." or "..". +// TODO(wan@google.com): handle Windows network shares (e.g. \\server\share). +void FilePath::Normalize() { + if (pathname_.c_str() == NULL) { + pathname_ = ""; + return; + } + const char* src = pathname_.c_str(); + char* const dest = new char[pathname_.length() + 1]; + char* dest_ptr = dest; + memset(dest_ptr, 0, pathname_.length() + 1); + + while (*src != '\0') { + *dest_ptr = *src; + if (!IsPathSeparator(*src)) { + src++; + } else { +#if GTEST_HAS_ALT_PATH_SEP_ + if (*dest_ptr == kAlternatePathSeparator) { + *dest_ptr = kPathSeparator; + } +#endif + while (IsPathSeparator(*src)) + src++; + } + dest_ptr++; + } + *dest_ptr = '\0'; + pathname_ = dest; + delete[] dest; +} + +} // namespace internal +} // namespace testing +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + + +#include +#include +#include +#include + +#if GTEST_OS_WINDOWS_MOBILE +# include // For TerminateProcess() +#elif GTEST_OS_WINDOWS +# include +# include +#else +# include +#endif // GTEST_OS_WINDOWS_MOBILE + +#if GTEST_OS_MAC +# include +# include +# include +#endif // GTEST_OS_MAC + +#if GTEST_OS_QNX +# include +# include +#endif // GTEST_OS_QNX + + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick is to +// prevent a user from accidentally including gtest-internal-inl.h in +// his code. +#define GTEST_IMPLEMENTATION_ 1 +#undef GTEST_IMPLEMENTATION_ + +namespace testing { +namespace internal { + +#if defined(_MSC_VER) || defined(__BORLANDC__) +// MSVC and C++Builder do not provide a definition of STDERR_FILENO. +const int kStdOutFileno = 1; +const int kStdErrFileno = 2; +#else +const int kStdOutFileno = STDOUT_FILENO; +const int kStdErrFileno = STDERR_FILENO; +#endif // _MSC_VER + +#if GTEST_OS_MAC + +// Returns the number of threads running in the process, or 0 to indicate that +// we cannot detect it. +size_t GetThreadCount() { + const task_t task = mach_task_self(); + mach_msg_type_number_t thread_count; + thread_act_array_t thread_list; + const kern_return_t status = task_threads(task, &thread_list, &thread_count); + if (status == KERN_SUCCESS) { + // task_threads allocates resources in thread_list and we need to free them + // to avoid leaks. + vm_deallocate(task, + reinterpret_cast(thread_list), + sizeof(thread_t) * thread_count); + return static_cast(thread_count); + } else { + return 0; + } +} + +#elif GTEST_OS_QNX + +// Returns the number of threads running in the process, or 0 to indicate that +// we cannot detect it. +size_t GetThreadCount() { + const int fd = open("/proc/self/as", O_RDONLY); + if (fd < 0) { + return 0; + } + procfs_info process_info; + const int status = + devctl(fd, DCMD_PROC_INFO, &process_info, sizeof(process_info), NULL); + close(fd); + if (status == EOK) { + return static_cast(process_info.num_threads); + } else { + return 0; + } +} + +#else + +size_t GetThreadCount() { + // There's no portable way to detect the number of threads, so we just + // return 0 to indicate that we cannot detect it. + return 0; +} + +#endif // GTEST_OS_MAC + +#if GTEST_USES_POSIX_RE + +// Implements RE. Currently only needed for death tests. + +RE::~RE() { + if (is_valid_) { + // regfree'ing an invalid regex might crash because the content + // of the regex is undefined. Since the regex's are essentially + // the same, one cannot be valid (or invalid) without the other + // being so too. + regfree(&partial_regex_); + regfree(&full_regex_); + } + free(const_cast(pattern_)); +} + +// Returns true iff regular expression re matches the entire str. +bool RE::FullMatch(const char* str, const RE& re) { + if (!re.is_valid_) return false; + + regmatch_t match; + return regexec(&re.full_regex_, str, 1, &match, 0) == 0; +} + +// Returns true iff regular expression re matches a substring of str +// (including str itself). +bool RE::PartialMatch(const char* str, const RE& re) { + if (!re.is_valid_) return false; + + regmatch_t match; + return regexec(&re.partial_regex_, str, 1, &match, 0) == 0; +} + +// Initializes an RE from its string representation. +void RE::Init(const char* regex) { + pattern_ = posix::StrDup(regex); + + // Reserves enough bytes to hold the regular expression used for a + // full match. + const size_t full_regex_len = strlen(regex) + 10; + char* const full_pattern = new char[full_regex_len]; + + snprintf(full_pattern, full_regex_len, "^(%s)$", regex); + is_valid_ = regcomp(&full_regex_, full_pattern, REG_EXTENDED) == 0; + // We want to call regcomp(&partial_regex_, ...) even if the + // previous expression returns false. Otherwise partial_regex_ may + // not be properly initialized can may cause trouble when it's + // freed. + // + // Some implementation of POSIX regex (e.g. on at least some + // versions of Cygwin) doesn't accept the empty string as a valid + // regex. We change it to an equivalent form "()" to be safe. + if (is_valid_) { + const char* const partial_regex = (*regex == '\0') ? "()" : regex; + is_valid_ = regcomp(&partial_regex_, partial_regex, REG_EXTENDED) == 0; + } + EXPECT_TRUE(is_valid_) + << "Regular expression \"" << regex + << "\" is not a valid POSIX Extended regular expression."; + + delete[] full_pattern; +} + +#elif GTEST_USES_SIMPLE_RE + +// Returns true iff ch appears anywhere in str (excluding the +// terminating '\0' character). +bool IsInSet(char ch, const char* str) { + return ch != '\0' && strchr(str, ch) != NULL; +} + +// Returns true iff ch belongs to the given classification. Unlike +// similar functions in , these aren't affected by the +// current locale. +bool IsAsciiDigit(char ch) { return '0' <= ch && ch <= '9'; } +bool IsAsciiPunct(char ch) { + return IsInSet(ch, "^-!\"#$%&'()*+,./:;<=>?@[\\]_`{|}~"); +} +bool IsRepeat(char ch) { return IsInSet(ch, "?*+"); } +bool IsAsciiWhiteSpace(char ch) { return IsInSet(ch, " \f\n\r\t\v"); } +bool IsAsciiWordChar(char ch) { + return ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') || + ('0' <= ch && ch <= '9') || ch == '_'; +} + +// Returns true iff "\\c" is a supported escape sequence. +bool IsValidEscape(char c) { + return (IsAsciiPunct(c) || IsInSet(c, "dDfnrsStvwW")); +} + +// Returns true iff the given atom (specified by escaped and pattern) +// matches ch. The result is undefined if the atom is invalid. +bool AtomMatchesChar(bool escaped, char pattern_char, char ch) { + if (escaped) { // "\\p" where p is pattern_char. + switch (pattern_char) { + case 'd': return IsAsciiDigit(ch); + case 'D': return !IsAsciiDigit(ch); + case 'f': return ch == '\f'; + case 'n': return ch == '\n'; + case 'r': return ch == '\r'; + case 's': return IsAsciiWhiteSpace(ch); + case 'S': return !IsAsciiWhiteSpace(ch); + case 't': return ch == '\t'; + case 'v': return ch == '\v'; + case 'w': return IsAsciiWordChar(ch); + case 'W': return !IsAsciiWordChar(ch); + } + return IsAsciiPunct(pattern_char) && pattern_char == ch; + } + + return (pattern_char == '.' && ch != '\n') || pattern_char == ch; +} + +// Helper function used by ValidateRegex() to format error messages. +std::string FormatRegexSyntaxError(const char* regex, int index) { + return (Message() << "Syntax error at index " << index + << " in simple regular expression \"" << regex << "\": ").GetString(); +} + +// Generates non-fatal failures and returns false if regex is invalid; +// otherwise returns true. +bool ValidateRegex(const char* regex) { + if (regex == NULL) { + // TODO(wan@google.com): fix the source file location in the + // assertion failures to match where the regex is used in user + // code. + ADD_FAILURE() << "NULL is not a valid simple regular expression."; + return false; + } + + bool is_valid = true; + + // True iff ?, *, or + can follow the previous atom. + bool prev_repeatable = false; + for (int i = 0; regex[i]; i++) { + if (regex[i] == '\\') { // An escape sequence + i++; + if (regex[i] == '\0') { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1) + << "'\\' cannot appear at the end."; + return false; + } + + if (!IsValidEscape(regex[i])) { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1) + << "invalid escape sequence \"\\" << regex[i] << "\"."; + is_valid = false; + } + prev_repeatable = true; + } else { // Not an escape sequence. + const char ch = regex[i]; + + if (ch == '^' && i > 0) { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i) + << "'^' can only appear at the beginning."; + is_valid = false; + } else if (ch == '$' && regex[i + 1] != '\0') { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i) + << "'$' can only appear at the end."; + is_valid = false; + } else if (IsInSet(ch, "()[]{}|")) { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i) + << "'" << ch << "' is unsupported."; + is_valid = false; + } else if (IsRepeat(ch) && !prev_repeatable) { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i) + << "'" << ch << "' can only follow a repeatable token."; + is_valid = false; + } + + prev_repeatable = !IsInSet(ch, "^$?*+"); + } + } + + return is_valid; +} + +// Matches a repeated regex atom followed by a valid simple regular +// expression. The regex atom is defined as c if escaped is false, +// or \c otherwise. repeat is the repetition meta character (?, *, +// or +). The behavior is undefined if str contains too many +// characters to be indexable by size_t, in which case the test will +// probably time out anyway. We are fine with this limitation as +// std::string has it too. +bool MatchRepetitionAndRegexAtHead( + bool escaped, char c, char repeat, const char* regex, + const char* str) { + const size_t min_count = (repeat == '+') ? 1 : 0; + const size_t max_count = (repeat == '?') ? 1 : + static_cast(-1) - 1; + // We cannot call numeric_limits::max() as it conflicts with the + // max() macro on Windows. + + for (size_t i = 0; i <= max_count; ++i) { + // We know that the atom matches each of the first i characters in str. + if (i >= min_count && MatchRegexAtHead(regex, str + i)) { + // We have enough matches at the head, and the tail matches too. + // Since we only care about *whether* the pattern matches str + // (as opposed to *how* it matches), there is no need to find a + // greedy match. + return true; + } + if (str[i] == '\0' || !AtomMatchesChar(escaped, c, str[i])) + return false; + } + return false; +} + +// Returns true iff regex matches a prefix of str. regex must be a +// valid simple regular expression and not start with "^", or the +// result is undefined. +bool MatchRegexAtHead(const char* regex, const char* str) { + if (*regex == '\0') // An empty regex matches a prefix of anything. + return true; + + // "$" only matches the end of a string. Note that regex being + // valid guarantees that there's nothing after "$" in it. + if (*regex == '$') + return *str == '\0'; + + // Is the first thing in regex an escape sequence? + const bool escaped = *regex == '\\'; + if (escaped) + ++regex; + if (IsRepeat(regex[1])) { + // MatchRepetitionAndRegexAtHead() calls MatchRegexAtHead(), so + // here's an indirect recursion. It terminates as the regex gets + // shorter in each recursion. + return MatchRepetitionAndRegexAtHead( + escaped, regex[0], regex[1], regex + 2, str); + } else { + // regex isn't empty, isn't "$", and doesn't start with a + // repetition. We match the first atom of regex with the first + // character of str and recurse. + return (*str != '\0') && AtomMatchesChar(escaped, *regex, *str) && + MatchRegexAtHead(regex + 1, str + 1); + } +} + +// Returns true iff regex matches any substring of str. regex must be +// a valid simple regular expression, or the result is undefined. +// +// The algorithm is recursive, but the recursion depth doesn't exceed +// the regex length, so we won't need to worry about running out of +// stack space normally. In rare cases the time complexity can be +// exponential with respect to the regex length + the string length, +// but usually it's must faster (often close to linear). +bool MatchRegexAnywhere(const char* regex, const char* str) { + if (regex == NULL || str == NULL) + return false; + + if (*regex == '^') + return MatchRegexAtHead(regex + 1, str); + + // A successful match can be anywhere in str. + do { + if (MatchRegexAtHead(regex, str)) + return true; + } while (*str++ != '\0'); + return false; +} + +// Implements the RE class. + +RE::~RE() { + free(const_cast(pattern_)); + free(const_cast(full_pattern_)); +} + +// Returns true iff regular expression re matches the entire str. +bool RE::FullMatch(const char* str, const RE& re) { + return re.is_valid_ && MatchRegexAnywhere(re.full_pattern_, str); +} + +// Returns true iff regular expression re matches a substring of str +// (including str itself). +bool RE::PartialMatch(const char* str, const RE& re) { + return re.is_valid_ && MatchRegexAnywhere(re.pattern_, str); +} + +// Initializes an RE from its string representation. +void RE::Init(const char* regex) { + pattern_ = full_pattern_ = NULL; + if (regex != NULL) { + pattern_ = posix::StrDup(regex); + } + + is_valid_ = ValidateRegex(regex); + if (!is_valid_) { + // No need to calculate the full pattern when the regex is invalid. + return; + } + + const size_t len = strlen(regex); + // Reserves enough bytes to hold the regular expression used for a + // full match: we need space to prepend a '^', append a '$', and + // terminate the string with '\0'. + char* buffer = static_cast(malloc(len + 3)); + full_pattern_ = buffer; + + if (*regex != '^') + *buffer++ = '^'; // Makes sure full_pattern_ starts with '^'. + + // We don't use snprintf or strncpy, as they trigger a warning when + // compiled with VC++ 8.0. + memcpy(buffer, regex, len); + buffer += len; + + if (len == 0 || regex[len - 1] != '$') + *buffer++ = '$'; // Makes sure full_pattern_ ends with '$'. + + *buffer = '\0'; +} + +#endif // GTEST_USES_POSIX_RE + +const char kUnknownFile[] = "unknown file"; + +// Formats a source file path and a line number as they would appear +// in an error message from the compiler used to compile this code. +GTEST_API_ ::std::string FormatFileLocation(const char* file, int line) { + const std::string file_name(file == NULL ? kUnknownFile : file); + + if (line < 0) { + return file_name + ":"; + } +#ifdef _MSC_VER + return file_name + "(" + StreamableToString(line) + "):"; +#else + return file_name + ":" + StreamableToString(line) + ":"; +#endif // _MSC_VER +} + +// Formats a file location for compiler-independent XML output. +// Although this function is not platform dependent, we put it next to +// FormatFileLocation in order to contrast the two functions. +// Note that FormatCompilerIndependentFileLocation() does NOT append colon +// to the file location it produces, unlike FormatFileLocation(). +GTEST_API_ ::std::string FormatCompilerIndependentFileLocation( + const char* file, int line) { + const std::string file_name(file == NULL ? kUnknownFile : file); + + if (line < 0) + return file_name; + else + return file_name + ":" + StreamableToString(line); +} + + +GTestLog::GTestLog(GTestLogSeverity severity, const char* file, int line) + : severity_(severity) { + const char* const marker = + severity == GTEST_INFO ? "[ INFO ]" : + severity == GTEST_WARNING ? "[WARNING]" : + severity == GTEST_ERROR ? "[ ERROR ]" : "[ FATAL ]"; + GetStream() << ::std::endl << marker << " " + << FormatFileLocation(file, line).c_str() << ": "; +} + +// Flushes the buffers and, if severity is GTEST_FATAL, aborts the program. +GTestLog::~GTestLog() { + GetStream() << ::std::endl; + if (severity_ == GTEST_FATAL) { + fflush(stderr); + posix::Abort(); + } +} +// Disable Microsoft deprecation warnings for POSIX functions called from +// this class (creat, dup, dup2, and close) +#ifdef _MSC_VER +# pragma warning(push) +# pragma warning(disable: 4996) +#endif // _MSC_VER + +#if GTEST_HAS_STREAM_REDIRECTION + +// Object that captures an output stream (stdout/stderr). +class CapturedStream { + public: + // The ctor redirects the stream to a temporary file. + explicit CapturedStream(int fd) : fd_(fd), uncaptured_fd_(dup(fd)) { +# if GTEST_OS_WINDOWS + char temp_dir_path[MAX_PATH + 1] = { '\0' }; // NOLINT + char temp_file_path[MAX_PATH + 1] = { '\0' }; // NOLINT + + ::GetTempPathA(sizeof(temp_dir_path), temp_dir_path); + const UINT success = ::GetTempFileNameA(temp_dir_path, + "gtest_redir", + 0, // Generate unique file name. + temp_file_path); + GTEST_CHECK_(success != 0) + << "Unable to create a temporary file in " << temp_dir_path; + const int captured_fd = creat(temp_file_path, _S_IREAD | _S_IWRITE); + GTEST_CHECK_(captured_fd != -1) << "Unable to open temporary file " + << temp_file_path; + filename_ = temp_file_path; +# else + // There's no guarantee that a test has write access to the current + // directory, so we create the temporary file in the /tmp directory + // instead. We use /tmp on most systems, and /sdcard on Android. + // That's because Android doesn't have /tmp. +# if GTEST_OS_LINUX_ANDROID + // Note: Android applications are expected to call the framework's + // Context.getExternalStorageDirectory() method through JNI to get + // the location of the world-writable SD Card directory. However, + // this requires a Context handle, which cannot be retrieved + // globally from native code. Doing so also precludes running the + // code as part of a regular standalone executable, which doesn't + // run in a Dalvik process (e.g. when running it through 'adb shell'). + // + // The location /sdcard is directly accessible from native code + // and is the only location (unofficially) supported by the Android + // team. It's generally a symlink to the real SD Card mount point + // which can be /mnt/sdcard, /mnt/sdcard0, /system/media/sdcard, or + // other OEM-customized locations. Never rely on these, and always + // use /sdcard. + char name_template[] = "/sdcard/gtest_captured_stream.XXXXXX"; +# else + char name_template[] = "/tmp/captured_stream.XXXXXX"; +# endif // GTEST_OS_LINUX_ANDROID + const int captured_fd = mkstemp(name_template); + filename_ = name_template; +# endif // GTEST_OS_WINDOWS + fflush(NULL); + dup2(captured_fd, fd_); + close(captured_fd); + } + + ~CapturedStream() { + remove(filename_.c_str()); + } + + std::string GetCapturedString() { + if (uncaptured_fd_ != -1) { + // Restores the original stream. + fflush(NULL); + dup2(uncaptured_fd_, fd_); + close(uncaptured_fd_); + uncaptured_fd_ = -1; + } + + FILE* const file = posix::FOpen(filename_.c_str(), "r"); + const std::string content = ReadEntireFile(file); + posix::FClose(file); + return content; + } + + private: + // Reads the entire content of a file as an std::string. + static std::string ReadEntireFile(FILE* file); + + // Returns the size (in bytes) of a file. + static size_t GetFileSize(FILE* file); + + const int fd_; // A stream to capture. + int uncaptured_fd_; + // Name of the temporary file holding the stderr output. + ::std::string filename_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(CapturedStream); +}; + +// Returns the size (in bytes) of a file. +size_t CapturedStream::GetFileSize(FILE* file) { + fseek(file, 0, SEEK_END); + return static_cast(ftell(file)); +} + +// Reads the entire content of a file as a string. +std::string CapturedStream::ReadEntireFile(FILE* file) { + const size_t file_size = GetFileSize(file); + char* const buffer = new char[file_size]; + + size_t bytes_last_read = 0; // # of bytes read in the last fread() + size_t bytes_read = 0; // # of bytes read so far + + fseek(file, 0, SEEK_SET); + + // Keeps reading the file until we cannot read further or the + // pre-determined file size is reached. + do { + bytes_last_read = fread(buffer+bytes_read, 1, file_size-bytes_read, file); + bytes_read += bytes_last_read; + } while (bytes_last_read > 0 && bytes_read < file_size); + + const std::string content(buffer, bytes_read); + delete[] buffer; + + return content; +} + +# ifdef _MSC_VER +# pragma warning(pop) +# endif // _MSC_VER + +static CapturedStream* g_captured_stderr = NULL; +static CapturedStream* g_captured_stdout = NULL; + +// Starts capturing an output stream (stdout/stderr). +void CaptureStream(int fd, const char* stream_name, CapturedStream** stream) { + if (*stream != NULL) { + GTEST_LOG_(FATAL) << "Only one " << stream_name + << " capturer can exist at a time."; + } + *stream = new CapturedStream(fd); +} + +// Stops capturing the output stream and returns the captured string. +std::string GetCapturedStream(CapturedStream** captured_stream) { + const std::string content = (*captured_stream)->GetCapturedString(); + + delete *captured_stream; + *captured_stream = NULL; + + return content; +} + +// Starts capturing stdout. +void CaptureStdout() { + CaptureStream(kStdOutFileno, "stdout", &g_captured_stdout); +} + +// Starts capturing stderr. +void CaptureStderr() { + CaptureStream(kStdErrFileno, "stderr", &g_captured_stderr); +} + +// Stops capturing stdout and returns the captured string. +std::string GetCapturedStdout() { + return GetCapturedStream(&g_captured_stdout); +} + +// Stops capturing stderr and returns the captured string. +std::string GetCapturedStderr() { + return GetCapturedStream(&g_captured_stderr); +} + +#endif // GTEST_HAS_STREAM_REDIRECTION + +#if GTEST_HAS_DEATH_TEST + +// A copy of all command line arguments. Set by InitGoogleTest(). +::std::vector g_argvs; + +static const ::std::vector* g_injected_test_argvs = + NULL; // Owned. + +void SetInjectableArgvs(const ::std::vector* argvs) { + if (g_injected_test_argvs != argvs) + delete g_injected_test_argvs; + g_injected_test_argvs = argvs; +} + +const ::std::vector& GetInjectableArgvs() { + if (g_injected_test_argvs != NULL) { + return *g_injected_test_argvs; + } + return g_argvs; +} +#endif // GTEST_HAS_DEATH_TEST + +#if GTEST_OS_WINDOWS_MOBILE +namespace posix { +void Abort() { + DebugBreak(); + TerminateProcess(GetCurrentProcess(), 1); +} +} // namespace posix +#endif // GTEST_OS_WINDOWS_MOBILE + +// Returns the name of the environment variable corresponding to the +// given flag. For example, FlagToEnvVar("foo") will return +// "GTEST_FOO" in the open-source version. +static std::string FlagToEnvVar(const char* flag) { + const std::string full_flag = + (Message() << GTEST_FLAG_PREFIX_ << flag).GetString(); + + Message env_var; + for (size_t i = 0; i != full_flag.length(); i++) { + env_var << ToUpper(full_flag.c_str()[i]); + } + + return env_var.GetString(); +} + +// Parses 'str' for a 32-bit signed integer. If successful, writes +// the result to *value and returns true; otherwise leaves *value +// unchanged and returns false. +bool ParseInt32(const Message& src_text, const char* str, Int32* value) { + // Parses the environment variable as a decimal integer. + char* end = NULL; + const long long_value = strtol(str, &end, 10); // NOLINT + + // Has strtol() consumed all characters in the string? + if (*end != '\0') { + // No - an invalid character was encountered. + Message msg; + msg << "WARNING: " << src_text + << " is expected to be a 32-bit integer, but actually" + << " has value \"" << str << "\".\n"; + printf("%s", msg.GetString().c_str()); + fflush(stdout); + return false; + } + + // Is the parsed value in the range of an Int32? + const Int32 result = static_cast(long_value); + if (long_value == LONG_MAX || long_value == LONG_MIN || + // The parsed value overflows as a long. (strtol() returns + // LONG_MAX or LONG_MIN when the input overflows.) + result != long_value + // The parsed value overflows as an Int32. + ) { + Message msg; + msg << "WARNING: " << src_text + << " is expected to be a 32-bit integer, but actually" + << " has value " << str << ", which overflows.\n"; + printf("%s", msg.GetString().c_str()); + fflush(stdout); + return false; + } + + *value = result; + return true; +} + +// Reads and returns the Boolean environment variable corresponding to +// the given flag; if it's not set, returns default_value. +// +// The value is considered true iff it's not "0". +bool BoolFromGTestEnv(const char* flag, bool default_value) { + const std::string env_var = FlagToEnvVar(flag); + const char* const string_value = posix::GetEnv(env_var.c_str()); + return string_value == NULL ? + default_value : strcmp(string_value, "0") != 0; +} + +// Reads and returns a 32-bit integer stored in the environment +// variable corresponding to the given flag; if it isn't set or +// doesn't represent a valid 32-bit integer, returns default_value. +Int32 Int32FromGTestEnv(const char* flag, Int32 default_value) { + const std::string env_var = FlagToEnvVar(flag); + const char* const string_value = posix::GetEnv(env_var.c_str()); + if (string_value == NULL) { + // The environment variable is not set. + return default_value; + } + + Int32 result = default_value; + if (!ParseInt32(Message() << "Environment variable " << env_var, + string_value, &result)) { + printf("The default value %s is used.\n", + (Message() << default_value).GetString().c_str()); + fflush(stdout); + return default_value; + } + + return result; +} + +// Reads and returns the string environment variable corresponding to +// the given flag; if it's not set, returns default_value. +const char* StringFromGTestEnv(const char* flag, const char* default_value) { + const std::string env_var = FlagToEnvVar(flag); + const char* const value = posix::GetEnv(env_var.c_str()); + return value == NULL ? default_value : value; +} + +} // namespace internal +} // namespace testing +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Google Test - The Google C++ Testing Framework +// +// This file implements a universal value printer that can print a +// value of any type T: +// +// void ::testing::internal::UniversalPrinter::Print(value, ostream_ptr); +// +// It uses the << operator when possible, and prints the bytes in the +// object otherwise. A user can override its behavior for a class +// type Foo by defining either operator<<(::std::ostream&, const Foo&) +// or void PrintTo(const Foo&, ::std::ostream*) in the namespace that +// defines Foo. + +#include +#include +#include // NOLINT +#include + +namespace testing { + +namespace { + +using ::std::ostream; + +// Prints a segment of bytes in the given object. +void PrintByteSegmentInObjectTo(const unsigned char* obj_bytes, size_t start, + size_t count, ostream* os) { + char text[5] = ""; + for (size_t i = 0; i != count; i++) { + const size_t j = start + i; + if (i != 0) { + // Organizes the bytes into groups of 2 for easy parsing by + // human. + if ((j % 2) == 0) + *os << ' '; + else + *os << '-'; + } + GTEST_SNPRINTF_(text, sizeof(text), "%02X", obj_bytes[j]); + *os << text; + } +} + +// Prints the bytes in the given value to the given ostream. +void PrintBytesInObjectToImpl(const unsigned char* obj_bytes, size_t count, + ostream* os) { + // Tells the user how big the object is. + *os << count << "-byte object <"; + + const size_t kThreshold = 132; + const size_t kChunkSize = 64; + // If the object size is bigger than kThreshold, we'll have to omit + // some details by printing only the first and the last kChunkSize + // bytes. + // TODO(wan): let the user control the threshold using a flag. + if (count < kThreshold) { + PrintByteSegmentInObjectTo(obj_bytes, 0, count, os); + } else { + PrintByteSegmentInObjectTo(obj_bytes, 0, kChunkSize, os); + *os << " ... "; + // Rounds up to 2-byte boundary. + const size_t resume_pos = (count - kChunkSize + 1)/2*2; + PrintByteSegmentInObjectTo(obj_bytes, resume_pos, count - resume_pos, os); + } + *os << ">"; +} + +} // namespace + +namespace internal2 { + +// Delegates to PrintBytesInObjectToImpl() to print the bytes in the +// given object. The delegation simplifies the implementation, which +// uses the << operator and thus is easier done outside of the +// ::testing::internal namespace, which contains a << operator that +// sometimes conflicts with the one in STL. +void PrintBytesInObjectTo(const unsigned char* obj_bytes, size_t count, + ostream* os) { + PrintBytesInObjectToImpl(obj_bytes, count, os); +} + +} // namespace internal2 + +namespace internal { + +// Depending on the value of a char (or wchar_t), we print it in one +// of three formats: +// - as is if it's a printable ASCII (e.g. 'a', '2', ' '), +// - as a hexidecimal escape sequence (e.g. '\x7F'), or +// - as a special escape sequence (e.g. '\r', '\n'). +enum CharFormat { + kAsIs, + kHexEscape, + kSpecialEscape +}; + +// Returns true if c is a printable ASCII character. We test the +// value of c directly instead of calling isprint(), which is buggy on +// Windows Mobile. +inline bool IsPrintableAscii(wchar_t c) { + return 0x20 <= c && c <= 0x7E; +} + +// Prints a wide or narrow char c as a character literal without the +// quotes, escaping it when necessary; returns how c was formatted. +// The template argument UnsignedChar is the unsigned version of Char, +// which is the type of c. +template +static CharFormat PrintAsCharLiteralTo(Char c, ostream* os) { + switch (static_cast(c)) { + case L'\0': + *os << "\\0"; + break; + case L'\'': + *os << "\\'"; + break; + case L'\\': + *os << "\\\\"; + break; + case L'\a': + *os << "\\a"; + break; + case L'\b': + *os << "\\b"; + break; + case L'\f': + *os << "\\f"; + break; + case L'\n': + *os << "\\n"; + break; + case L'\r': + *os << "\\r"; + break; + case L'\t': + *os << "\\t"; + break; + case L'\v': + *os << "\\v"; + break; + default: + if (IsPrintableAscii(c)) { + *os << static_cast(c); + return kAsIs; + } else { + *os << "\\x" + String::FormatHexInt(static_cast(c)); + return kHexEscape; + } + } + return kSpecialEscape; +} + +// Prints a wchar_t c as if it's part of a string literal, escaping it when +// necessary; returns how c was formatted. +static CharFormat PrintAsStringLiteralTo(wchar_t c, ostream* os) { + switch (c) { + case L'\'': + *os << "'"; + return kAsIs; + case L'"': + *os << "\\\""; + return kSpecialEscape; + default: + return PrintAsCharLiteralTo(c, os); + } +} + +// Prints a char c as if it's part of a string literal, escaping it when +// necessary; returns how c was formatted. +static CharFormat PrintAsStringLiteralTo(char c, ostream* os) { + return PrintAsStringLiteralTo( + static_cast(static_cast(c)), os); +} + +// Prints a wide or narrow character c and its code. '\0' is printed +// as "'\\0'", other unprintable characters are also properly escaped +// using the standard C++ escape sequence. The template argument +// UnsignedChar is the unsigned version of Char, which is the type of c. +template +void PrintCharAndCodeTo(Char c, ostream* os) { + // First, print c as a literal in the most readable form we can find. + *os << ((sizeof(c) > 1) ? "L'" : "'"); + const CharFormat format = PrintAsCharLiteralTo(c, os); + *os << "'"; + + // To aid user debugging, we also print c's code in decimal, unless + // it's 0 (in which case c was printed as '\\0', making the code + // obvious). + if (c == 0) + return; + *os << " (" << static_cast(c); + + // For more convenience, we print c's code again in hexidecimal, + // unless c was already printed in the form '\x##' or the code is in + // [1, 9]. + if (format == kHexEscape || (1 <= c && c <= 9)) { + // Do nothing. + } else { + *os << ", 0x" << String::FormatHexInt(static_cast(c)); + } + *os << ")"; +} + +void PrintTo(unsigned char c, ::std::ostream* os) { + PrintCharAndCodeTo(c, os); +} +void PrintTo(signed char c, ::std::ostream* os) { + PrintCharAndCodeTo(c, os); +} + +// Prints a wchar_t as a symbol if it is printable or as its internal +// code otherwise and also as its code. L'\0' is printed as "L'\\0'". +void PrintTo(wchar_t wc, ostream* os) { + PrintCharAndCodeTo(wc, os); +} + +// Prints the given array of characters to the ostream. CharType must be either +// char or wchar_t. +// The array starts at begin, the length is len, it may include '\0' characters +// and may not be NUL-terminated. +template +static void PrintCharsAsStringTo( + const CharType* begin, size_t len, ostream* os) { + const char* const kQuoteBegin = sizeof(CharType) == 1 ? "\"" : "L\""; + *os << kQuoteBegin; + bool is_previous_hex = false; + for (size_t index = 0; index < len; ++index) { + const CharType cur = begin[index]; + if (is_previous_hex && IsXDigit(cur)) { + // Previous character is of '\x..' form and this character can be + // interpreted as another hexadecimal digit in its number. Break string to + // disambiguate. + *os << "\" " << kQuoteBegin; + } + is_previous_hex = PrintAsStringLiteralTo(cur, os) == kHexEscape; + } + *os << "\""; +} + +// Prints a (const) char/wchar_t array of 'len' elements, starting at address +// 'begin'. CharType must be either char or wchar_t. +template +static void UniversalPrintCharArray( + const CharType* begin, size_t len, ostream* os) { + // The code + // const char kFoo[] = "foo"; + // generates an array of 4, not 3, elements, with the last one being '\0'. + // + // Therefore when printing a char array, we don't print the last element if + // it's '\0', such that the output matches the string literal as it's + // written in the source code. + if (len > 0 && begin[len - 1] == '\0') { + PrintCharsAsStringTo(begin, len - 1, os); + return; + } + + // If, however, the last element in the array is not '\0', e.g. + // const char kFoo[] = { 'f', 'o', 'o' }; + // we must print the entire array. We also print a message to indicate + // that the array is not NUL-terminated. + PrintCharsAsStringTo(begin, len, os); + *os << " (no terminating NUL)"; +} + +// Prints a (const) char array of 'len' elements, starting at address 'begin'. +void UniversalPrintArray(const char* begin, size_t len, ostream* os) { + UniversalPrintCharArray(begin, len, os); +} + +// Prints a (const) wchar_t array of 'len' elements, starting at address +// 'begin'. +void UniversalPrintArray(const wchar_t* begin, size_t len, ostream* os) { + UniversalPrintCharArray(begin, len, os); +} + +// Prints the given C string to the ostream. +void PrintTo(const char* s, ostream* os) { + if (s == NULL) { + *os << "NULL"; + } else { + *os << ImplicitCast_(s) << " pointing to "; + PrintCharsAsStringTo(s, strlen(s), os); + } +} + +// MSVC compiler can be configured to define whar_t as a typedef +// of unsigned short. Defining an overload for const wchar_t* in that case +// would cause pointers to unsigned shorts be printed as wide strings, +// possibly accessing more memory than intended and causing invalid +// memory accesses. MSVC defines _NATIVE_WCHAR_T_DEFINED symbol when +// wchar_t is implemented as a native type. +#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED) +// Prints the given wide C string to the ostream. +void PrintTo(const wchar_t* s, ostream* os) { + if (s == NULL) { + *os << "NULL"; + } else { + *os << ImplicitCast_(s) << " pointing to "; + PrintCharsAsStringTo(s, wcslen(s), os); + } +} +#endif // wchar_t is native + +// Prints a ::string object. +#if GTEST_HAS_GLOBAL_STRING +void PrintStringTo(const ::string& s, ostream* os) { + PrintCharsAsStringTo(s.data(), s.size(), os); +} +#endif // GTEST_HAS_GLOBAL_STRING + +void PrintStringTo(const ::std::string& s, ostream* os) { + PrintCharsAsStringTo(s.data(), s.size(), os); +} + +// Prints a ::wstring object. +#if GTEST_HAS_GLOBAL_WSTRING +void PrintWideStringTo(const ::wstring& s, ostream* os) { + PrintCharsAsStringTo(s.data(), s.size(), os); +} +#endif // GTEST_HAS_GLOBAL_WSTRING + +#if GTEST_HAS_STD_WSTRING +void PrintWideStringTo(const ::std::wstring& s, ostream* os) { + PrintCharsAsStringTo(s.data(), s.size(), os); +} +#endif // GTEST_HAS_STD_WSTRING + +} // namespace internal + +} // namespace testing +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: mheule@google.com (Markus Heule) +// +// The Google C++ Testing Framework (Google Test) + + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick is to +// prevent a user from accidentally including gtest-internal-inl.h in +// his code. +#define GTEST_IMPLEMENTATION_ 1 +#undef GTEST_IMPLEMENTATION_ + +namespace testing { + +using internal::GetUnitTestImpl; + +// Gets the summary of the failure message by omitting the stack trace +// in it. +std::string TestPartResult::ExtractSummary(const char* message) { + const char* const stack_trace = strstr(message, internal::kStackTraceMarker); + return stack_trace == NULL ? message : + std::string(message, stack_trace); +} + +// Prints a TestPartResult object. +std::ostream& operator<<(std::ostream& os, const TestPartResult& result) { + return os + << result.file_name() << ":" << result.line_number() << ": " + << (result.type() == TestPartResult::kSuccess ? "Success" : + result.type() == TestPartResult::kFatalFailure ? "Fatal failure" : + "Non-fatal failure") << ":\n" + << result.message() << std::endl; +} + +// Appends a TestPartResult to the array. +void TestPartResultArray::Append(const TestPartResult& result) { + array_.push_back(result); +} + +// Returns the TestPartResult at the given index (0-based). +const TestPartResult& TestPartResultArray::GetTestPartResult(int index) const { + if (index < 0 || index >= size()) { + printf("\nInvalid index (%d) into TestPartResultArray.\n", index); + internal::posix::Abort(); + } + + return array_[index]; +} + +// Returns the number of TestPartResult objects in the array. +int TestPartResultArray::size() const { + return static_cast(array_.size()); +} + +namespace internal { + +HasNewFatalFailureHelper::HasNewFatalFailureHelper() + : has_new_fatal_failure_(false), + original_reporter_(GetUnitTestImpl()-> + GetTestPartResultReporterForCurrentThread()) { + GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread(this); +} + +HasNewFatalFailureHelper::~HasNewFatalFailureHelper() { + GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread( + original_reporter_); +} + +void HasNewFatalFailureHelper::ReportTestPartResult( + const TestPartResult& result) { + if (result.fatally_failed()) + has_new_fatal_failure_ = true; + original_reporter_->ReportTestPartResult(result); +} + +} // namespace internal + +} // namespace testing +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + + +namespace testing { +namespace internal { + +#if GTEST_HAS_TYPED_TEST_P + +// Skips to the first non-space char in str. Returns an empty string if str +// contains only whitespace characters. +static const char* SkipSpaces(const char* str) { + while (IsSpace(*str)) + str++; + return str; +} + +// Verifies that registered_tests match the test names in +// defined_test_names_; returns registered_tests if successful, or +// aborts the program otherwise. +const char* TypedTestCasePState::VerifyRegisteredTestNames( + const char* file, int line, const char* registered_tests) { + typedef ::std::set::const_iterator DefinedTestIter; + registered_ = true; + + // Skip initial whitespace in registered_tests since some + // preprocessors prefix stringizied literals with whitespace. + registered_tests = SkipSpaces(registered_tests); + + Message errors; + ::std::set tests; + for (const char* names = registered_tests; names != NULL; + names = SkipComma(names)) { + const std::string name = GetPrefixUntilComma(names); + if (tests.count(name) != 0) { + errors << "Test " << name << " is listed more than once.\n"; + continue; + } + + bool found = false; + for (DefinedTestIter it = defined_test_names_.begin(); + it != defined_test_names_.end(); + ++it) { + if (name == *it) { + found = true; + break; + } + } + + if (found) { + tests.insert(name); + } else { + errors << "No test named " << name + << " can be found in this test case.\n"; + } + } + + for (DefinedTestIter it = defined_test_names_.begin(); + it != defined_test_names_.end(); + ++it) { + if (tests.count(*it) == 0) { + errors << "You forgot to list test " << *it << ".\n"; + } + } + + const std::string& errors_str = errors.GetString(); + if (errors_str != "") { + fprintf(stderr, "%s %s", FormatFileLocation(file, line).c_str(), + errors_str.c_str()); + fflush(stderr); + posix::Abort(); + } + + return registered_tests; +} + +#endif // GTEST_HAS_TYPED_TEST_P + +} // namespace internal +} // namespace testing diff --git a/lib/kokkos/tpls/gtest/gtest/gtest-test-part.h b/lib/kokkos/tpls/gtest/gtest/gtest-test-part.h new file mode 120000 index 0000000000..48d39090f1 --- /dev/null +++ b/lib/kokkos/tpls/gtest/gtest/gtest-test-part.h @@ -0,0 +1 @@ +gtest.h \ No newline at end of file diff --git a/lib/kokkos/tpls/gtest/gtest/gtest.h b/lib/kokkos/tpls/gtest/gtest/gtest.h new file mode 100644 index 0000000000..c74d098fa9 --- /dev/null +++ b/lib/kokkos/tpls/gtest/gtest/gtest.h @@ -0,0 +1,20065 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file defines the public API for Google Test. It should be +// included by any test program that uses Google Test. +// +// IMPORTANT NOTE: Due to limitation of the C++ language, we have to +// leave some internal implementation details in this header file. +// They are clearly marked by comments like this: +// +// // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +// +// Such code is NOT meant to be used by a user directly, and is subject +// to CHANGE WITHOUT NOTICE. Therefore DO NOT DEPEND ON IT in a user +// program! +// +// Acknowledgment: Google Test borrowed the idea of automatic test +// registration from Barthelemy Dagenais' (barthelemy@prologique.com) +// easyUnit framework. + +#ifdef __GNUC__ +#pragma GCC system_header +#endif + +#ifndef GTEST_INCLUDE_GTEST_GTEST_H_ +#define GTEST_INCLUDE_GTEST_GTEST_H_ + +#include +#include +#include + +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file declares functions and macros used internally by +// Google Test. They are subject to change without notice. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_ + +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: wan@google.com (Zhanyong Wan) +// +// Low-level types and utilities for porting Google Test to various +// platforms. They are subject to change without notice. DO NOT USE +// THEM IN USER CODE. +// +// This file is fundamental to Google Test. All other Google Test source +// files are expected to #include this. Therefore, it cannot #include +// any other Google Test header. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_ + +// The user can define the following macros in the build script to +// control Google Test's behavior. If the user doesn't define a macro +// in this list, Google Test will define it. +// +// GTEST_HAS_CLONE - Define it to 1/0 to indicate that clone(2) +// is/isn't available. +// GTEST_HAS_EXCEPTIONS - Define it to 1/0 to indicate that exceptions +// are enabled. +// GTEST_HAS_GLOBAL_STRING - Define it to 1/0 to indicate that ::string +// is/isn't available (some systems define +// ::string, which is different to std::string). +// GTEST_HAS_GLOBAL_WSTRING - Define it to 1/0 to indicate that ::string +// is/isn't available (some systems define +// ::wstring, which is different to std::wstring). +// GTEST_HAS_POSIX_RE - Define it to 1/0 to indicate that POSIX regular +// expressions are/aren't available. +// GTEST_HAS_PTHREAD - Define it to 1/0 to indicate that +// is/isn't available. +// GTEST_HAS_RTTI - Define it to 1/0 to indicate that RTTI is/isn't +// enabled. +// GTEST_HAS_STD_WSTRING - Define it to 1/0 to indicate that +// std::wstring does/doesn't work (Google Test can +// be used where std::wstring is unavailable). +// GTEST_HAS_TR1_TUPLE - Define it to 1/0 to indicate tr1::tuple +// is/isn't available. +// GTEST_HAS_SEH - Define it to 1/0 to indicate whether the +// compiler supports Microsoft's "Structured +// Exception Handling". +// GTEST_HAS_STREAM_REDIRECTION +// - Define it to 1/0 to indicate whether the +// platform supports I/O stream redirection using +// dup() and dup2(). +// GTEST_USE_OWN_TR1_TUPLE - Define it to 1/0 to indicate whether Google +// Test's own tr1 tuple implementation should be +// used. Unused when the user sets +// GTEST_HAS_TR1_TUPLE to 0. +// GTEST_LANG_CXX11 - Define it to 1/0 to indicate that Google Test +// is building in C++11/C++98 mode. +// GTEST_LINKED_AS_SHARED_LIBRARY +// - Define to 1 when compiling tests that use +// Google Test as a shared library (known as +// DLL on Windows). +// GTEST_CREATE_SHARED_LIBRARY +// - Define to 1 when compiling Google Test itself +// as a shared library. + +// This header defines the following utilities: +// +// Macros indicating the current platform (defined to 1 if compiled on +// the given platform; otherwise undefined): +// GTEST_OS_AIX - IBM AIX +// GTEST_OS_CYGWIN - Cygwin +// GTEST_OS_HPUX - HP-UX +// GTEST_OS_LINUX - Linux +// GTEST_OS_LINUX_ANDROID - Google Android +// GTEST_OS_MAC - Mac OS X +// GTEST_OS_IOS - iOS +// GTEST_OS_IOS_SIMULATOR - iOS simulator +// GTEST_OS_NACL - Google Native Client (NaCl) +// GTEST_OS_OPENBSD - OpenBSD +// GTEST_OS_QNX - QNX +// GTEST_OS_SOLARIS - Sun Solaris +// GTEST_OS_SYMBIAN - Symbian +// GTEST_OS_WINDOWS - Windows (Desktop, MinGW, or Mobile) +// GTEST_OS_WINDOWS_DESKTOP - Windows Desktop +// GTEST_OS_WINDOWS_MINGW - MinGW +// GTEST_OS_WINDOWS_MOBILE - Windows Mobile +// GTEST_OS_ZOS - z/OS +// +// Among the platforms, Cygwin, Linux, Max OS X, and Windows have the +// most stable support. Since core members of the Google Test project +// don't have access to other platforms, support for them may be less +// stable. If you notice any problems on your platform, please notify +// googletestframework@googlegroups.com (patches for fixing them are +// even more welcome!). +// +// Note that it is possible that none of the GTEST_OS_* macros are defined. +// +// Macros indicating available Google Test features (defined to 1 if +// the corresponding feature is supported; otherwise undefined): +// GTEST_HAS_COMBINE - the Combine() function (for value-parameterized +// tests) +// GTEST_HAS_DEATH_TEST - death tests +// GTEST_HAS_PARAM_TEST - value-parameterized tests +// GTEST_HAS_TYPED_TEST - typed tests +// GTEST_HAS_TYPED_TEST_P - type-parameterized tests +// GTEST_USES_POSIX_RE - enhanced POSIX regex is used. Do not confuse with +// GTEST_HAS_POSIX_RE (see above) which users can +// define themselves. +// GTEST_USES_SIMPLE_RE - our own simple regex is used; +// the above two are mutually exclusive. +// GTEST_CAN_COMPARE_NULL - accepts untyped NULL in EXPECT_EQ(). +// +// Macros for basic C++ coding: +// GTEST_AMBIGUOUS_ELSE_BLOCKER_ - for disabling a gcc warning. +// GTEST_ATTRIBUTE_UNUSED_ - declares that a class' instances or a +// variable don't have to be used. +// GTEST_DISALLOW_ASSIGN_ - disables operator=. +// GTEST_DISALLOW_COPY_AND_ASSIGN_ - disables copy ctor and operator=. +// GTEST_MUST_USE_RESULT_ - declares that a function's result must be used. +// +// Synchronization: +// Mutex, MutexLock, ThreadLocal, GetThreadCount() +// - synchronization primitives. +// GTEST_IS_THREADSAFE - defined to 1 to indicate that the above +// synchronization primitives have real implementations +// and Google Test is thread-safe; or 0 otherwise. +// +// Template meta programming: +// is_pointer - as in TR1; needed on Symbian and IBM XL C/C++ only. +// IteratorTraits - partial implementation of std::iterator_traits, which +// is not available in libCstd when compiled with Sun C++. +// +// Smart pointers: +// scoped_ptr - as in TR2. +// +// Regular expressions: +// RE - a simple regular expression class using the POSIX +// Extended Regular Expression syntax on UNIX-like +// platforms, or a reduced regular exception syntax on +// other platforms, including Windows. +// +// Logging: +// GTEST_LOG_() - logs messages at the specified severity level. +// LogToStderr() - directs all log messages to stderr. +// FlushInfoLog() - flushes informational log messages. +// +// Stdout and stderr capturing: +// CaptureStdout() - starts capturing stdout. +// GetCapturedStdout() - stops capturing stdout and returns the captured +// string. +// CaptureStderr() - starts capturing stderr. +// GetCapturedStderr() - stops capturing stderr and returns the captured +// string. +// +// Integer types: +// TypeWithSize - maps an integer to a int type. +// Int32, UInt32, Int64, UInt64, TimeInMillis +// - integers of known sizes. +// BiggestInt - the biggest signed integer type. +// +// Command-line utilities: +// GTEST_FLAG() - references a flag. +// GTEST_DECLARE_*() - declares a flag. +// GTEST_DEFINE_*() - defines a flag. +// GetInjectableArgvs() - returns the command line as a vector of strings. +// +// Environment variable utilities: +// GetEnv() - gets the value of an environment variable. +// BoolFromGTestEnv() - parses a bool environment variable. +// Int32FromGTestEnv() - parses an Int32 environment variable. +// StringFromGTestEnv() - parses a string environment variable. + +#include // for isspace, etc +#include // for ptrdiff_t +#include +#include +#include +#ifndef _WIN32_WCE +# include +# include +#endif // !_WIN32_WCE + +#if defined __APPLE__ +# include +# include +#endif + +#include // NOLINT +#include // NOLINT +#include // NOLINT + +#define GTEST_DEV_EMAIL_ "googletestframework@@googlegroups.com" +#define GTEST_FLAG_PREFIX_ "gtest_" +#define GTEST_FLAG_PREFIX_DASH_ "gtest-" +#define GTEST_FLAG_PREFIX_UPPER_ "GTEST_" +#define GTEST_NAME_ "Google Test" +#define GTEST_PROJECT_URL_ "http://code.google.com/p/googletest/" + +// Determines the version of gcc that is used to compile this. +#ifdef __GNUC__ +// 40302 means version 4.3.2. +# define GTEST_GCC_VER_ \ + (__GNUC__*10000 + __GNUC_MINOR__*100 + __GNUC_PATCHLEVEL__) +#endif // __GNUC__ + +// Determines the platform on which Google Test is compiled. +#ifdef __CYGWIN__ +# define GTEST_OS_CYGWIN 1 +#elif defined __SYMBIAN32__ +# define GTEST_OS_SYMBIAN 1 +#elif defined _WIN32 +# define GTEST_OS_WINDOWS 1 +# ifdef _WIN32_WCE +# define GTEST_OS_WINDOWS_MOBILE 1 +# elif defined(__MINGW__) || defined(__MINGW32__) +# define GTEST_OS_WINDOWS_MINGW 1 +# else +# define GTEST_OS_WINDOWS_DESKTOP 1 +# endif // _WIN32_WCE +#elif defined __APPLE__ +# define GTEST_OS_MAC 1 +# if TARGET_OS_IPHONE +# define GTEST_OS_IOS 1 +# if TARGET_IPHONE_SIMULATOR +# define GTEST_OS_IOS_SIMULATOR 1 +# endif +# endif +#elif defined __linux__ +# define GTEST_OS_LINUX 1 +# if defined __ANDROID__ +# define GTEST_OS_LINUX_ANDROID 1 +# endif +#elif defined __MVS__ +# define GTEST_OS_ZOS 1 +#elif defined(__sun) && defined(__SVR4) +# define GTEST_OS_SOLARIS 1 +#elif defined(_AIX) +# define GTEST_OS_AIX 1 +#elif defined(__hpux) +# define GTEST_OS_HPUX 1 +#elif defined __native_client__ +# define GTEST_OS_NACL 1 +#elif defined __OpenBSD__ +# define GTEST_OS_OPENBSD 1 +#elif defined __QNX__ +# define GTEST_OS_QNX 1 +#endif // __CYGWIN__ + +#ifndef GTEST_LANG_CXX11 +// gcc and clang define __GXX_EXPERIMENTAL_CXX0X__ when +// -std={c,gnu}++{0x,11} is passed. The C++11 standard specifies a +// value for __cplusplus, and recent versions of clang, gcc, and +// probably other compilers set that too in C++11 mode. +# if __GXX_EXPERIMENTAL_CXX0X__ || __cplusplus >= 201103L +// Compiling in at least C++11 mode. +# define GTEST_LANG_CXX11 1 +# else +# define GTEST_LANG_CXX11 0 +# endif +#endif + +// Brings in definitions for functions used in the testing::internal::posix +// namespace (read, write, close, chdir, isatty, stat). We do not currently +// use them on Windows Mobile. +#if !GTEST_OS_WINDOWS +// This assumes that non-Windows OSes provide unistd.h. For OSes where this +// is not the case, we need to include headers that provide the functions +// mentioned above. +# include +# include +#elif !GTEST_OS_WINDOWS_MOBILE +# include +# include +#endif + +#if GTEST_OS_LINUX_ANDROID +// Used to define __ANDROID_API__ matching the target NDK API level. +# include // NOLINT +#endif + +// Defines this to true iff Google Test can use POSIX regular expressions. +#ifndef GTEST_HAS_POSIX_RE +# if GTEST_OS_LINUX_ANDROID +// On Android, is only available starting with Gingerbread. +# define GTEST_HAS_POSIX_RE (__ANDROID_API__ >= 9) +# else +# define GTEST_HAS_POSIX_RE (!GTEST_OS_WINDOWS) +# endif +#endif + +#if GTEST_HAS_POSIX_RE + +// On some platforms, needs someone to define size_t, and +// won't compile otherwise. We can #include it here as we already +// included , which is guaranteed to define size_t through +// . +# include // NOLINT + +# define GTEST_USES_POSIX_RE 1 + +#elif GTEST_OS_WINDOWS + +// is not available on Windows. Use our own simple regex +// implementation instead. +# define GTEST_USES_SIMPLE_RE 1 + +#else + +// may not be available on this platform. Use our own +// simple regex implementation instead. +# define GTEST_USES_SIMPLE_RE 1 + +#endif // GTEST_HAS_POSIX_RE + +#ifndef GTEST_HAS_EXCEPTIONS +// The user didn't tell us whether exceptions are enabled, so we need +// to figure it out. +# if defined(_MSC_VER) || defined(__BORLANDC__) +// MSVC's and C++Builder's implementations of the STL use the _HAS_EXCEPTIONS +// macro to enable exceptions, so we'll do the same. +// Assumes that exceptions are enabled by default. +# ifndef _HAS_EXCEPTIONS +# define _HAS_EXCEPTIONS 1 +# endif // _HAS_EXCEPTIONS +# define GTEST_HAS_EXCEPTIONS _HAS_EXCEPTIONS +# elif defined(__GNUC__) && __EXCEPTIONS +// gcc defines __EXCEPTIONS to 1 iff exceptions are enabled. +# define GTEST_HAS_EXCEPTIONS 1 +# elif defined(__SUNPRO_CC) +// Sun Pro CC supports exceptions. However, there is no compile-time way of +// detecting whether they are enabled or not. Therefore, we assume that +// they are enabled unless the user tells us otherwise. +# define GTEST_HAS_EXCEPTIONS 1 +# elif defined(__IBMCPP__) && __EXCEPTIONS +// xlC defines __EXCEPTIONS to 1 iff exceptions are enabled. +# define GTEST_HAS_EXCEPTIONS 1 +# elif defined(__HP_aCC) +// Exception handling is in effect by default in HP aCC compiler. It has to +// be turned of by +noeh compiler option if desired. +# define GTEST_HAS_EXCEPTIONS 1 +# else +// For other compilers, we assume exceptions are disabled to be +// conservative. +# define GTEST_HAS_EXCEPTIONS 0 +# endif // defined(_MSC_VER) || defined(__BORLANDC__) +#endif // GTEST_HAS_EXCEPTIONS + +#if !defined(GTEST_HAS_STD_STRING) +// Even though we don't use this macro any longer, we keep it in case +// some clients still depend on it. +# define GTEST_HAS_STD_STRING 1 +#elif !GTEST_HAS_STD_STRING +// The user told us that ::std::string isn't available. +# error "Google Test cannot be used where ::std::string isn't available." +#endif // !defined(GTEST_HAS_STD_STRING) + +#ifndef GTEST_HAS_GLOBAL_STRING +// The user didn't tell us whether ::string is available, so we need +// to figure it out. + +# define GTEST_HAS_GLOBAL_STRING 0 + +#endif // GTEST_HAS_GLOBAL_STRING + +#ifndef GTEST_HAS_STD_WSTRING +// The user didn't tell us whether ::std::wstring is available, so we need +// to figure it out. +// TODO(wan@google.com): uses autoconf to detect whether ::std::wstring +// is available. + +// Cygwin 1.7 and below doesn't support ::std::wstring. +// Solaris' libc++ doesn't support it either. Android has +// no support for it at least as recent as Froyo (2.2). +# define GTEST_HAS_STD_WSTRING \ + (!(GTEST_OS_LINUX_ANDROID || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS)) + +#endif // GTEST_HAS_STD_WSTRING + +#ifndef GTEST_HAS_GLOBAL_WSTRING +// The user didn't tell us whether ::wstring is available, so we need +// to figure it out. +# define GTEST_HAS_GLOBAL_WSTRING \ + (GTEST_HAS_STD_WSTRING && GTEST_HAS_GLOBAL_STRING) +#endif // GTEST_HAS_GLOBAL_WSTRING + +// Determines whether RTTI is available. +#ifndef GTEST_HAS_RTTI +// The user didn't tell us whether RTTI is enabled, so we need to +// figure it out. + +# ifdef _MSC_VER + +# ifdef _CPPRTTI // MSVC defines this macro iff RTTI is enabled. +# define GTEST_HAS_RTTI 1 +# else +# define GTEST_HAS_RTTI 0 +# endif + +// Starting with version 4.3.2, gcc defines __GXX_RTTI iff RTTI is enabled. +# elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40302) + +# ifdef __GXX_RTTI +// When building against STLport with the Android NDK and with +// -frtti -fno-exceptions, the build fails at link time with undefined +// references to __cxa_bad_typeid. Note sure if STL or toolchain bug, +// so disable RTTI when detected. +# if GTEST_OS_LINUX_ANDROID && defined(_STLPORT_MAJOR) && \ + !defined(__EXCEPTIONS) +# define GTEST_HAS_RTTI 0 +# else +# define GTEST_HAS_RTTI 1 +# endif // GTEST_OS_LINUX_ANDROID && __STLPORT_MAJOR && !__EXCEPTIONS +# else +# define GTEST_HAS_RTTI 0 +# endif // __GXX_RTTI + +// Clang defines __GXX_RTTI starting with version 3.0, but its manual recommends +// using has_feature instead. has_feature(cxx_rtti) is supported since 2.7, the +// first version with C++ support. +# elif defined(__clang__) + +# define GTEST_HAS_RTTI __has_feature(cxx_rtti) + +// Starting with version 9.0 IBM Visual Age defines __RTTI_ALL__ to 1 if +// both the typeid and dynamic_cast features are present. +# elif defined(__IBMCPP__) && (__IBMCPP__ >= 900) + +# ifdef __RTTI_ALL__ +# define GTEST_HAS_RTTI 1 +# else +# define GTEST_HAS_RTTI 0 +# endif + +# else + +// For all other compilers, we assume RTTI is enabled. +# define GTEST_HAS_RTTI 1 + +# endif // _MSC_VER + +#endif // GTEST_HAS_RTTI + +// It's this header's responsibility to #include when RTTI +// is enabled. +#if GTEST_HAS_RTTI +# include +#endif + +// Determines whether Google Test can use the pthreads library. +#ifndef GTEST_HAS_PTHREAD +// The user didn't tell us explicitly, so we assume pthreads support is +// available on Linux and Mac. +// +// To disable threading support in Google Test, add -DGTEST_HAS_PTHREAD=0 +// to your compiler flags. +# define GTEST_HAS_PTHREAD (GTEST_OS_LINUX || GTEST_OS_MAC || GTEST_OS_HPUX \ + || GTEST_OS_QNX) +#endif // GTEST_HAS_PTHREAD + +#if GTEST_HAS_PTHREAD +// gtest-port.h guarantees to #include when GTEST_HAS_PTHREAD is +// true. +# include // NOLINT + +// For timespec and nanosleep, used below. +# include // NOLINT +#endif + +// Determines whether Google Test can use tr1/tuple. You can define +// this macro to 0 to prevent Google Test from using tuple (any +// feature depending on tuple with be disabled in this mode). +#ifndef GTEST_HAS_TR1_TUPLE +# if GTEST_OS_LINUX_ANDROID && defined(_STLPORT_MAJOR) +// STLport, provided with the Android NDK, has neither or . +# define GTEST_HAS_TR1_TUPLE 0 +# else +// The user didn't tell us not to do it, so we assume it's OK. +# define GTEST_HAS_TR1_TUPLE 1 +# endif +#endif // GTEST_HAS_TR1_TUPLE + +// Determines whether Google Test's own tr1 tuple implementation +// should be used. +#ifndef GTEST_USE_OWN_TR1_TUPLE +// The user didn't tell us, so we need to figure it out. + +// We use our own TR1 tuple if we aren't sure the user has an +// implementation of it already. At this time, libstdc++ 4.0.0+ and +// MSVC 2010 are the only mainstream standard libraries that come +// with a TR1 tuple implementation. NVIDIA's CUDA NVCC compiler +// pretends to be GCC by defining __GNUC__ and friends, but cannot +// compile GCC's tuple implementation. MSVC 2008 (9.0) provides TR1 +// tuple in a 323 MB Feature Pack download, which we cannot assume the +// user has. QNX's QCC compiler is a modified GCC but it doesn't +// support TR1 tuple. libc++ only provides std::tuple, in C++11 mode, +// and it can be used with some compilers that define __GNUC__. +# if (defined(__GNUC__) && !defined(__CUDACC__) && (GTEST_GCC_VER_ >= 40000) \ + && !GTEST_OS_QNX && !defined(_LIBCPP_VERSION)) || _MSC_VER >= 1600 +# define GTEST_ENV_HAS_TR1_TUPLE_ 1 +# endif + +// C++11 specifies that provides std::tuple. Use that if gtest is used +// in C++11 mode and libstdc++ isn't very old (binaries targeting OS X 10.6 +// can build with clang but need to use gcc4.2's libstdc++). +# if GTEST_LANG_CXX11 && (!defined(__GLIBCXX__) || __GLIBCXX__ > 20110325) +# define GTEST_ENV_HAS_STD_TUPLE_ 1 +# endif + +# if GTEST_ENV_HAS_TR1_TUPLE_ || GTEST_ENV_HAS_STD_TUPLE_ +# define GTEST_USE_OWN_TR1_TUPLE 0 +# else +# define GTEST_USE_OWN_TR1_TUPLE 1 +# endif + +#endif // GTEST_USE_OWN_TR1_TUPLE + +// To avoid conditional compilation everywhere, we make it +// gtest-port.h's responsibility to #include the header implementing +// tr1/tuple. +#if GTEST_HAS_TR1_TUPLE + +# if GTEST_USE_OWN_TR1_TUPLE +// This file was GENERATED by command: +// pump.py gtest-tuple.h.pump +// DO NOT EDIT BY HAND!!! + +// Copyright 2009 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Implements a subset of TR1 tuple needed by Google Test and Google Mock. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_ + +#include // For ::std::pair. + +// The compiler used in Symbian has a bug that prevents us from declaring the +// tuple template as a friend (it complains that tuple is redefined). This +// hack bypasses the bug by declaring the members that should otherwise be +// private as public. +// Sun Studio versions < 12 also have the above bug. +#if defined(__SYMBIAN32__) || (defined(__SUNPRO_CC) && __SUNPRO_CC < 0x590) +# define GTEST_DECLARE_TUPLE_AS_FRIEND_ public: +#else +# define GTEST_DECLARE_TUPLE_AS_FRIEND_ \ + template friend class tuple; \ + private: +#endif + +// GTEST_n_TUPLE_(T) is the type of an n-tuple. +#define GTEST_0_TUPLE_(T) tuple<> +#define GTEST_1_TUPLE_(T) tuple +#define GTEST_2_TUPLE_(T) tuple +#define GTEST_3_TUPLE_(T) tuple +#define GTEST_4_TUPLE_(T) tuple +#define GTEST_5_TUPLE_(T) tuple +#define GTEST_6_TUPLE_(T) tuple +#define GTEST_7_TUPLE_(T) tuple +#define GTEST_8_TUPLE_(T) tuple +#define GTEST_9_TUPLE_(T) tuple +#define GTEST_10_TUPLE_(T) tuple + +// GTEST_n_TYPENAMES_(T) declares a list of n typenames. +#define GTEST_0_TYPENAMES_(T) +#define GTEST_1_TYPENAMES_(T) typename T##0 +#define GTEST_2_TYPENAMES_(T) typename T##0, typename T##1 +#define GTEST_3_TYPENAMES_(T) typename T##0, typename T##1, typename T##2 +#define GTEST_4_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3 +#define GTEST_5_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4 +#define GTEST_6_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4, typename T##5 +#define GTEST_7_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4, typename T##5, typename T##6 +#define GTEST_8_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4, typename T##5, typename T##6, typename T##7 +#define GTEST_9_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4, typename T##5, typename T##6, \ + typename T##7, typename T##8 +#define GTEST_10_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4, typename T##5, typename T##6, \ + typename T##7, typename T##8, typename T##9 + +// In theory, defining stuff in the ::std namespace is undefined +// behavior. We can do this as we are playing the role of a standard +// library vendor. +namespace std { +namespace tr1 { + +template +class tuple; + +// Anything in namespace gtest_internal is Google Test's INTERNAL +// IMPLEMENTATION DETAIL and MUST NOT BE USED DIRECTLY in user code. +namespace gtest_internal { + +// ByRef::type is T if T is a reference; otherwise it's const T&. +template +struct ByRef { typedef const T& type; }; // NOLINT +template +struct ByRef { typedef T& type; }; // NOLINT + +// A handy wrapper for ByRef. +#define GTEST_BY_REF_(T) typename ::std::tr1::gtest_internal::ByRef::type + +// AddRef::type is T if T is a reference; otherwise it's T&. This +// is the same as tr1::add_reference::type. +template +struct AddRef { typedef T& type; }; // NOLINT +template +struct AddRef { typedef T& type; }; // NOLINT + +// A handy wrapper for AddRef. +#define GTEST_ADD_REF_(T) typename ::std::tr1::gtest_internal::AddRef::type + +// A helper for implementing get(). +template class Get; + +// A helper for implementing tuple_element. kIndexValid is true +// iff k < the number of fields in tuple type T. +template +struct TupleElement; + +template +struct TupleElement { + typedef T0 type; +}; + +template +struct TupleElement { + typedef T1 type; +}; + +template +struct TupleElement { + typedef T2 type; +}; + +template +struct TupleElement { + typedef T3 type; +}; + +template +struct TupleElement { + typedef T4 type; +}; + +template +struct TupleElement { + typedef T5 type; +}; + +template +struct TupleElement { + typedef T6 type; +}; + +template +struct TupleElement { + typedef T7 type; +}; + +template +struct TupleElement { + typedef T8 type; +}; + +template +struct TupleElement { + typedef T9 type; +}; + +} // namespace gtest_internal + +template <> +class tuple<> { + public: + tuple() {} + tuple(const tuple& /* t */) {} + tuple& operator=(const tuple& /* t */) { return *this; } +}; + +template +class GTEST_1_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0) : f0_(f0) {} + + tuple(const tuple& t) : f0_(t.f0_) {} + + template + tuple(const GTEST_1_TUPLE_(U)& t) : f0_(t.f0_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_1_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_1_TUPLE_(U)& t) { + f0_ = t.f0_; + return *this; + } + + T0 f0_; +}; + +template +class GTEST_2_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1) : f0_(f0), + f1_(f1) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_) {} + + template + tuple(const GTEST_2_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_) {} + template + tuple(const ::std::pair& p) : f0_(p.first), f1_(p.second) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_2_TUPLE_(U)& t) { + return CopyFrom(t); + } + template + tuple& operator=(const ::std::pair& p) { + f0_ = p.first; + f1_ = p.second; + return *this; + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_2_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + return *this; + } + + T0 f0_; + T1 f1_; +}; + +template +class GTEST_3_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2) : f0_(f0), f1_(f1), f2_(f2) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {} + + template + tuple(const GTEST_3_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_3_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_3_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; +}; + +template +class GTEST_4_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3) : f0_(f0), f1_(f1), f2_(f2), + f3_(f3) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_) {} + + template + tuple(const GTEST_4_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_4_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_4_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; +}; + +template +class GTEST_5_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, + GTEST_BY_REF_(T4) f4) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_) {} + + template + tuple(const GTEST_5_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_5_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_5_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; +}; + +template +class GTEST_6_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4, + GTEST_BY_REF_(T5) f5) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4), + f5_(f5) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_), f5_(t.f5_) {} + + template + tuple(const GTEST_6_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_), f5_(t.f5_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_6_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_6_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + f5_ = t.f5_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; + T5 f5_; +}; + +template +class GTEST_7_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4, + GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6) : f0_(f0), f1_(f1), f2_(f2), + f3_(f3), f4_(f4), f5_(f5), f6_(f6) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {} + + template + tuple(const GTEST_7_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_7_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_7_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + f5_ = t.f5_; + f6_ = t.f6_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; + T5 f5_; + T6 f6_; +}; + +template +class GTEST_8_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4, + GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, + GTEST_BY_REF_(T7) f7) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4), + f5_(f5), f6_(f6), f7_(f7) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {} + + template + tuple(const GTEST_8_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_8_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_8_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + f5_ = t.f5_; + f6_ = t.f6_; + f7_ = t.f7_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; + T5 f5_; + T6 f6_; + T7 f7_; +}; + +template +class GTEST_9_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4, + GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7, + GTEST_BY_REF_(T8) f8) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4), + f5_(f5), f6_(f6), f7_(f7), f8_(f8) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {} + + template + tuple(const GTEST_9_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_9_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_9_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + f5_ = t.f5_; + f6_ = t.f6_; + f7_ = t.f7_; + f8_ = t.f8_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; + T5 f5_; + T6 f6_; + T7 f7_; + T8 f8_; +}; + +template +class tuple { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_(), + f9_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4, + GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7, + GTEST_BY_REF_(T8) f8, GTEST_BY_REF_(T9) f9) : f0_(f0), f1_(f1), f2_(f2), + f3_(f3), f4_(f4), f5_(f5), f6_(f6), f7_(f7), f8_(f8), f9_(f9) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_), f9_(t.f9_) {} + + template + tuple(const GTEST_10_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_), + f9_(t.f9_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_10_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_10_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + f5_ = t.f5_; + f6_ = t.f6_; + f7_ = t.f7_; + f8_ = t.f8_; + f9_ = t.f9_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; + T5 f5_; + T6 f6_; + T7 f7_; + T8 f8_; + T9 f9_; +}; + +// 6.1.3.2 Tuple creation functions. + +// Known limitations: we don't support passing an +// std::tr1::reference_wrapper to make_tuple(). And we don't +// implement tie(). + +inline tuple<> make_tuple() { return tuple<>(); } + +template +inline GTEST_1_TUPLE_(T) make_tuple(const T0& f0) { + return GTEST_1_TUPLE_(T)(f0); +} + +template +inline GTEST_2_TUPLE_(T) make_tuple(const T0& f0, const T1& f1) { + return GTEST_2_TUPLE_(T)(f0, f1); +} + +template +inline GTEST_3_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2) { + return GTEST_3_TUPLE_(T)(f0, f1, f2); +} + +template +inline GTEST_4_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3) { + return GTEST_4_TUPLE_(T)(f0, f1, f2, f3); +} + +template +inline GTEST_5_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4) { + return GTEST_5_TUPLE_(T)(f0, f1, f2, f3, f4); +} + +template +inline GTEST_6_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4, const T5& f5) { + return GTEST_6_TUPLE_(T)(f0, f1, f2, f3, f4, f5); +} + +template +inline GTEST_7_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4, const T5& f5, const T6& f6) { + return GTEST_7_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6); +} + +template +inline GTEST_8_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7) { + return GTEST_8_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7); +} + +template +inline GTEST_9_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7, + const T8& f8) { + return GTEST_9_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8); +} + +template +inline GTEST_10_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7, + const T8& f8, const T9& f9) { + return GTEST_10_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9); +} + +// 6.1.3.3 Tuple helper classes. + +template struct tuple_size; + +template +struct tuple_size { + static const int value = 0; +}; + +template +struct tuple_size { + static const int value = 1; +}; + +template +struct tuple_size { + static const int value = 2; +}; + +template +struct tuple_size { + static const int value = 3; +}; + +template +struct tuple_size { + static const int value = 4; +}; + +template +struct tuple_size { + static const int value = 5; +}; + +template +struct tuple_size { + static const int value = 6; +}; + +template +struct tuple_size { + static const int value = 7; +}; + +template +struct tuple_size { + static const int value = 8; +}; + +template +struct tuple_size { + static const int value = 9; +}; + +template +struct tuple_size { + static const int value = 10; +}; + +template +struct tuple_element { + typedef typename gtest_internal::TupleElement< + k < (tuple_size::value), k, Tuple>::type type; +}; + +#define GTEST_TUPLE_ELEMENT_(k, Tuple) typename tuple_element::type + +// 6.1.3.4 Element access. + +namespace gtest_internal { + +template <> +class Get<0> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple)) + Field(Tuple& t) { return t.f0_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple)) + ConstField(const Tuple& t) { return t.f0_; } +}; + +template <> +class Get<1> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple)) + Field(Tuple& t) { return t.f1_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple)) + ConstField(const Tuple& t) { return t.f1_; } +}; + +template <> +class Get<2> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple)) + Field(Tuple& t) { return t.f2_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple)) + ConstField(const Tuple& t) { return t.f2_; } +}; + +template <> +class Get<3> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple)) + Field(Tuple& t) { return t.f3_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple)) + ConstField(const Tuple& t) { return t.f3_; } +}; + +template <> +class Get<4> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple)) + Field(Tuple& t) { return t.f4_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple)) + ConstField(const Tuple& t) { return t.f4_; } +}; + +template <> +class Get<5> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple)) + Field(Tuple& t) { return t.f5_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple)) + ConstField(const Tuple& t) { return t.f5_; } +}; + +template <> +class Get<6> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple)) + Field(Tuple& t) { return t.f6_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple)) + ConstField(const Tuple& t) { return t.f6_; } +}; + +template <> +class Get<7> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple)) + Field(Tuple& t) { return t.f7_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple)) + ConstField(const Tuple& t) { return t.f7_; } +}; + +template <> +class Get<8> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple)) + Field(Tuple& t) { return t.f8_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple)) + ConstField(const Tuple& t) { return t.f8_; } +}; + +template <> +class Get<9> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple)) + Field(Tuple& t) { return t.f9_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple)) + ConstField(const Tuple& t) { return t.f9_; } +}; + +} // namespace gtest_internal + +template +GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(k, GTEST_10_TUPLE_(T))) +get(GTEST_10_TUPLE_(T)& t) { + return gtest_internal::Get::Field(t); +} + +template +GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(k, GTEST_10_TUPLE_(T))) +get(const GTEST_10_TUPLE_(T)& t) { + return gtest_internal::Get::ConstField(t); +} + +// 6.1.3.5 Relational operators + +// We only implement == and !=, as we don't have a need for the rest yet. + +namespace gtest_internal { + +// SameSizeTuplePrefixComparator::Eq(t1, t2) returns true if the +// first k fields of t1 equals the first k fields of t2. +// SameSizeTuplePrefixComparator(k1, k2) would be a compiler error if +// k1 != k2. +template +struct SameSizeTuplePrefixComparator; + +template <> +struct SameSizeTuplePrefixComparator<0, 0> { + template + static bool Eq(const Tuple1& /* t1 */, const Tuple2& /* t2 */) { + return true; + } +}; + +template +struct SameSizeTuplePrefixComparator { + template + static bool Eq(const Tuple1& t1, const Tuple2& t2) { + return SameSizeTuplePrefixComparator::Eq(t1, t2) && + ::std::tr1::get(t1) == ::std::tr1::get(t2); + } +}; + +} // namespace gtest_internal + +template +inline bool operator==(const GTEST_10_TUPLE_(T)& t, + const GTEST_10_TUPLE_(U)& u) { + return gtest_internal::SameSizeTuplePrefixComparator< + tuple_size::value, + tuple_size::value>::Eq(t, u); +} + +template +inline bool operator!=(const GTEST_10_TUPLE_(T)& t, + const GTEST_10_TUPLE_(U)& u) { return !(t == u); } + +// 6.1.4 Pairs. +// Unimplemented. + +} // namespace tr1 +} // namespace std + +#undef GTEST_0_TUPLE_ +#undef GTEST_1_TUPLE_ +#undef GTEST_2_TUPLE_ +#undef GTEST_3_TUPLE_ +#undef GTEST_4_TUPLE_ +#undef GTEST_5_TUPLE_ +#undef GTEST_6_TUPLE_ +#undef GTEST_7_TUPLE_ +#undef GTEST_8_TUPLE_ +#undef GTEST_9_TUPLE_ +#undef GTEST_10_TUPLE_ + +#undef GTEST_0_TYPENAMES_ +#undef GTEST_1_TYPENAMES_ +#undef GTEST_2_TYPENAMES_ +#undef GTEST_3_TYPENAMES_ +#undef GTEST_4_TYPENAMES_ +#undef GTEST_5_TYPENAMES_ +#undef GTEST_6_TYPENAMES_ +#undef GTEST_7_TYPENAMES_ +#undef GTEST_8_TYPENAMES_ +#undef GTEST_9_TYPENAMES_ +#undef GTEST_10_TYPENAMES_ + +#undef GTEST_DECLARE_TUPLE_AS_FRIEND_ +#undef GTEST_BY_REF_ +#undef GTEST_ADD_REF_ +#undef GTEST_TUPLE_ELEMENT_ + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_ +# elif GTEST_ENV_HAS_STD_TUPLE_ +# include +// C++11 puts its tuple into the ::std namespace rather than +// ::std::tr1. gtest expects tuple to live in ::std::tr1, so put it there. +// This causes undefined behavior, but supported compilers react in +// the way we intend. +namespace std { +namespace tr1 { +using ::std::get; +using ::std::make_tuple; +using ::std::tuple; +using ::std::tuple_element; +using ::std::tuple_size; +} +} + +# elif GTEST_OS_SYMBIAN + +// On Symbian, BOOST_HAS_TR1_TUPLE causes Boost's TR1 tuple library to +// use STLport's tuple implementation, which unfortunately doesn't +// work as the copy of STLport distributed with Symbian is incomplete. +// By making sure BOOST_HAS_TR1_TUPLE is undefined, we force Boost to +// use its own tuple implementation. +# ifdef BOOST_HAS_TR1_TUPLE +# undef BOOST_HAS_TR1_TUPLE +# endif // BOOST_HAS_TR1_TUPLE + +// This prevents , which defines +// BOOST_HAS_TR1_TUPLE, from being #included by Boost's . +# define BOOST_TR1_DETAIL_CONFIG_HPP_INCLUDED +# include + +# elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40000) +// GCC 4.0+ implements tr1/tuple in the header. This does +// not conform to the TR1 spec, which requires the header to be . + +# if !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302 +// Until version 4.3.2, gcc has a bug that causes , +// which is #included by , to not compile when RTTI is +// disabled. _TR1_FUNCTIONAL is the header guard for +// . Hence the following #define is a hack to prevent +// from being included. +# define _TR1_FUNCTIONAL 1 +# include +# undef _TR1_FUNCTIONAL // Allows the user to #include + // if he chooses to. +# else +# include // NOLINT +# endif // !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302 + +# else +// If the compiler is not GCC 4.0+, we assume the user is using a +// spec-conforming TR1 implementation. +# include // NOLINT +# endif // GTEST_USE_OWN_TR1_TUPLE + +#endif // GTEST_HAS_TR1_TUPLE + +// Determines whether clone(2) is supported. +// Usually it will only be available on Linux, excluding +// Linux on the Itanium architecture. +// Also see http://linux.die.net/man/2/clone. +#ifndef GTEST_HAS_CLONE +// The user didn't tell us, so we need to figure it out. + +# if GTEST_OS_LINUX && !defined(__ia64__) +# if GTEST_OS_LINUX_ANDROID +// On Android, clone() is only available on ARM starting with Gingerbread. +# if defined(__arm__) && __ANDROID_API__ >= 9 +# define GTEST_HAS_CLONE 1 +# else +# define GTEST_HAS_CLONE 0 +# endif +# else +# define GTEST_HAS_CLONE 1 +# endif +# else +# define GTEST_HAS_CLONE 0 +# endif // GTEST_OS_LINUX && !defined(__ia64__) + +#endif // GTEST_HAS_CLONE + +// Determines whether to support stream redirection. This is used to test +// output correctness and to implement death tests. +#ifndef GTEST_HAS_STREAM_REDIRECTION +// By default, we assume that stream redirection is supported on all +// platforms except known mobile ones. +# if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN +# define GTEST_HAS_STREAM_REDIRECTION 0 +# else +# define GTEST_HAS_STREAM_REDIRECTION 1 +# endif // !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_SYMBIAN +#endif // GTEST_HAS_STREAM_REDIRECTION + +// Determines whether to support death tests. +// Google Test does not support death tests for VC 7.1 and earlier as +// abort() in a VC 7.1 application compiled as GUI in debug config +// pops up a dialog window that cannot be suppressed programmatically. +#if (GTEST_OS_LINUX || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS || \ + (GTEST_OS_MAC && !GTEST_OS_IOS) || GTEST_OS_IOS_SIMULATOR || \ + (GTEST_OS_WINDOWS_DESKTOP && _MSC_VER >= 1400) || \ + GTEST_OS_WINDOWS_MINGW || GTEST_OS_AIX || GTEST_OS_HPUX || \ + GTEST_OS_OPENBSD || GTEST_OS_QNX) +# define GTEST_HAS_DEATH_TEST 1 +# include // NOLINT +#endif + +// We don't support MSVC 7.1 with exceptions disabled now. Therefore +// all the compilers we care about are adequate for supporting +// value-parameterized tests. +#define GTEST_HAS_PARAM_TEST 1 + +// Determines whether to support type-driven tests. + +// Typed tests need and variadic macros, which GCC, VC++ 8.0, +// Sun Pro CC, IBM Visual Age, and HP aCC support. +#if defined(__GNUC__) || (_MSC_VER >= 1400) || defined(__SUNPRO_CC) || \ + defined(__IBMCPP__) || defined(__HP_aCC) +# define GTEST_HAS_TYPED_TEST 1 +# define GTEST_HAS_TYPED_TEST_P 1 +#endif + +// Determines whether to support Combine(). This only makes sense when +// value-parameterized tests are enabled. The implementation doesn't +// work on Sun Studio since it doesn't understand templated conversion +// operators. +#if GTEST_HAS_PARAM_TEST && GTEST_HAS_TR1_TUPLE && !defined(__SUNPRO_CC) +# define GTEST_HAS_COMBINE 1 +#endif + +// Determines whether the system compiler uses UTF-16 for encoding wide strings. +#define GTEST_WIDE_STRING_USES_UTF16_ \ + (GTEST_OS_WINDOWS || GTEST_OS_CYGWIN || GTEST_OS_SYMBIAN || GTEST_OS_AIX) + +// Determines whether test results can be streamed to a socket. +#if GTEST_OS_LINUX +# define GTEST_CAN_STREAM_RESULTS_ 1 +#endif + +// Defines some utility macros. + +// The GNU compiler emits a warning if nested "if" statements are followed by +// an "else" statement and braces are not used to explicitly disambiguate the +// "else" binding. This leads to problems with code like: +// +// if (gate) +// ASSERT_*(condition) << "Some message"; +// +// The "switch (0) case 0:" idiom is used to suppress this. +#ifdef __INTEL_COMPILER +# define GTEST_AMBIGUOUS_ELSE_BLOCKER_ +#else +# define GTEST_AMBIGUOUS_ELSE_BLOCKER_ switch (0) case 0: default: // NOLINT +#endif + +// Use this annotation at the end of a struct/class definition to +// prevent the compiler from optimizing away instances that are never +// used. This is useful when all interesting logic happens inside the +// c'tor and / or d'tor. Example: +// +// struct Foo { +// Foo() { ... } +// } GTEST_ATTRIBUTE_UNUSED_; +// +// Also use it after a variable or parameter declaration to tell the +// compiler the variable/parameter does not have to be used. +#if defined(__GNUC__) && !defined(COMPILER_ICC) +# define GTEST_ATTRIBUTE_UNUSED_ __attribute__ ((unused)) +#else +# define GTEST_ATTRIBUTE_UNUSED_ +#endif + +// A macro to disallow operator= +// This should be used in the private: declarations for a class. +#define GTEST_DISALLOW_ASSIGN_(type)\ + void operator=(type const &) + +// A macro to disallow copy constructor and operator= +// This should be used in the private: declarations for a class. +#define GTEST_DISALLOW_COPY_AND_ASSIGN_(type)\ + type(type const &);\ + GTEST_DISALLOW_ASSIGN_(type) + +// Tell the compiler to warn about unused return values for functions declared +// with this macro. The macro should be used on function declarations +// following the argument list: +// +// Sprocket* AllocateSprocket() GTEST_MUST_USE_RESULT_; +#if defined(__GNUC__) && (GTEST_GCC_VER_ >= 30400) && !defined(COMPILER_ICC) +# define GTEST_MUST_USE_RESULT_ __attribute__ ((warn_unused_result)) +#else +# define GTEST_MUST_USE_RESULT_ +#endif // __GNUC__ && (GTEST_GCC_VER_ >= 30400) && !COMPILER_ICC + +// Determine whether the compiler supports Microsoft's Structured Exception +// Handling. This is supported by several Windows compilers but generally +// does not exist on any other system. +#ifndef GTEST_HAS_SEH +// The user didn't tell us, so we need to figure it out. + +# if defined(_MSC_VER) || defined(__BORLANDC__) +// These two compilers are known to support SEH. +# define GTEST_HAS_SEH 1 +# else +// Assume no SEH. +# define GTEST_HAS_SEH 0 +# endif + +#endif // GTEST_HAS_SEH + +#ifdef _MSC_VER + +# if GTEST_LINKED_AS_SHARED_LIBRARY +# define GTEST_API_ __declspec(dllimport) +# elif GTEST_CREATE_SHARED_LIBRARY +# define GTEST_API_ __declspec(dllexport) +# endif + +#endif // _MSC_VER + +#ifndef GTEST_API_ +# define GTEST_API_ +#endif + +#ifdef __GNUC__ +// Ask the compiler to never inline a given function. +# define GTEST_NO_INLINE_ __attribute__((noinline)) +#else +# define GTEST_NO_INLINE_ +#endif + +// _LIBCPP_VERSION is defined by the libc++ library from the LLVM project. +#if defined(__GLIBCXX__) || defined(_LIBCPP_VERSION) +# define GTEST_HAS_CXXABI_H_ 1 +#else +# define GTEST_HAS_CXXABI_H_ 0 +#endif + +namespace testing { + +class Message; + +namespace internal { + +// A secret type that Google Test users don't know about. It has no +// definition on purpose. Therefore it's impossible to create a +// Secret object, which is what we want. +class Secret; + +// The GTEST_COMPILE_ASSERT_ macro can be used to verify that a compile time +// expression is true. For example, you could use it to verify the +// size of a static array: +// +// GTEST_COMPILE_ASSERT_(ARRAYSIZE(content_type_names) == CONTENT_NUM_TYPES, +// content_type_names_incorrect_size); +// +// or to make sure a struct is smaller than a certain size: +// +// GTEST_COMPILE_ASSERT_(sizeof(foo) < 128, foo_too_large); +// +// The second argument to the macro is the name of the variable. If +// the expression is false, most compilers will issue a warning/error +// containing the name of the variable. + +template +struct CompileAssert { +}; + +#define GTEST_COMPILE_ASSERT_(expr, msg) \ + typedef ::testing::internal::CompileAssert<(static_cast(expr))> \ + msg[static_cast(expr) ? 1 : -1] GTEST_ATTRIBUTE_UNUSED_ + +// Implementation details of GTEST_COMPILE_ASSERT_: +// +// - GTEST_COMPILE_ASSERT_ works by defining an array type that has -1 +// elements (and thus is invalid) when the expression is false. +// +// - The simpler definition +// +// #define GTEST_COMPILE_ASSERT_(expr, msg) typedef char msg[(expr) ? 1 : -1] +// +// does not work, as gcc supports variable-length arrays whose sizes +// are determined at run-time (this is gcc's extension and not part +// of the C++ standard). As a result, gcc fails to reject the +// following code with the simple definition: +// +// int foo; +// GTEST_COMPILE_ASSERT_(foo, msg); // not supposed to compile as foo is +// // not a compile-time constant. +// +// - By using the type CompileAssert<(bool(expr))>, we ensures that +// expr is a compile-time constant. (Template arguments must be +// determined at compile-time.) +// +// - The outter parentheses in CompileAssert<(bool(expr))> are necessary +// to work around a bug in gcc 3.4.4 and 4.0.1. If we had written +// +// CompileAssert +// +// instead, these compilers will refuse to compile +// +// GTEST_COMPILE_ASSERT_(5 > 0, some_message); +// +// (They seem to think the ">" in "5 > 0" marks the end of the +// template argument list.) +// +// - The array size is (bool(expr) ? 1 : -1), instead of simply +// +// ((expr) ? 1 : -1). +// +// This is to avoid running into a bug in MS VC 7.1, which +// causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1. + +// StaticAssertTypeEqHelper is used by StaticAssertTypeEq defined in gtest.h. +// +// This template is declared, but intentionally undefined. +template +struct StaticAssertTypeEqHelper; + +template +struct StaticAssertTypeEqHelper {}; + +#if GTEST_HAS_GLOBAL_STRING +typedef ::string string; +#else +typedef ::std::string string; +#endif // GTEST_HAS_GLOBAL_STRING + +#if GTEST_HAS_GLOBAL_WSTRING +typedef ::wstring wstring; +#elif GTEST_HAS_STD_WSTRING +typedef ::std::wstring wstring; +#endif // GTEST_HAS_GLOBAL_WSTRING + +// A helper for suppressing warnings on constant condition. It just +// returns 'condition'. +GTEST_API_ bool IsTrue(bool condition); + +// Defines scoped_ptr. + +// This implementation of scoped_ptr is PARTIAL - it only contains +// enough stuff to satisfy Google Test's need. +template +class scoped_ptr { + public: + typedef T element_type; + + explicit scoped_ptr(T* p = NULL) : ptr_(p) {} + ~scoped_ptr() { reset(); } + + T& operator*() const { return *ptr_; } + T* operator->() const { return ptr_; } + T* get() const { return ptr_; } + + T* release() { + T* const ptr = ptr_; + ptr_ = NULL; + return ptr; + } + + void reset(T* p = NULL) { + if (p != ptr_) { + if (IsTrue(sizeof(T) > 0)) { // Makes sure T is a complete type. + delete ptr_; + } + ptr_ = p; + } + } + + private: + T* ptr_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(scoped_ptr); +}; + +// Defines RE. + +// A simple C++ wrapper for . It uses the POSIX Extended +// Regular Expression syntax. +class GTEST_API_ RE { + public: + // A copy constructor is required by the Standard to initialize object + // references from r-values. + RE(const RE& other) { Init(other.pattern()); } + + // Constructs an RE from a string. + RE(const ::std::string& regex) { Init(regex.c_str()); } // NOLINT + +#if GTEST_HAS_GLOBAL_STRING + + RE(const ::string& regex) { Init(regex.c_str()); } // NOLINT + +#endif // GTEST_HAS_GLOBAL_STRING + + RE(const char* regex) { Init(regex); } // NOLINT + ~RE(); + + // Returns the string representation of the regex. + const char* pattern() const { return pattern_; } + + // FullMatch(str, re) returns true iff regular expression re matches + // the entire str. + // PartialMatch(str, re) returns true iff regular expression re + // matches a substring of str (including str itself). + // + // TODO(wan@google.com): make FullMatch() and PartialMatch() work + // when str contains NUL characters. + static bool FullMatch(const ::std::string& str, const RE& re) { + return FullMatch(str.c_str(), re); + } + static bool PartialMatch(const ::std::string& str, const RE& re) { + return PartialMatch(str.c_str(), re); + } + +#if GTEST_HAS_GLOBAL_STRING + + static bool FullMatch(const ::string& str, const RE& re) { + return FullMatch(str.c_str(), re); + } + static bool PartialMatch(const ::string& str, const RE& re) { + return PartialMatch(str.c_str(), re); + } + +#endif // GTEST_HAS_GLOBAL_STRING + + static bool FullMatch(const char* str, const RE& re); + static bool PartialMatch(const char* str, const RE& re); + + private: + void Init(const char* regex); + + // We use a const char* instead of an std::string, as Google Test used to be + // used where std::string is not available. TODO(wan@google.com): change to + // std::string. + const char* pattern_; + bool is_valid_; + +#if GTEST_USES_POSIX_RE + + regex_t full_regex_; // For FullMatch(). + regex_t partial_regex_; // For PartialMatch(). + +#else // GTEST_USES_SIMPLE_RE + + const char* full_pattern_; // For FullMatch(); + +#endif + + GTEST_DISALLOW_ASSIGN_(RE); +}; + +// Formats a source file path and a line number as they would appear +// in an error message from the compiler used to compile this code. +GTEST_API_ ::std::string FormatFileLocation(const char* file, int line); + +// Formats a file location for compiler-independent XML output. +// Although this function is not platform dependent, we put it next to +// FormatFileLocation in order to contrast the two functions. +GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(const char* file, + int line); + +// Defines logging utilities: +// GTEST_LOG_(severity) - logs messages at the specified severity level. The +// message itself is streamed into the macro. +// LogToStderr() - directs all log messages to stderr. +// FlushInfoLog() - flushes informational log messages. + +enum GTestLogSeverity { + GTEST_INFO, + GTEST_WARNING, + GTEST_ERROR, + GTEST_FATAL +}; + +// Formats log entry severity, provides a stream object for streaming the +// log message, and terminates the message with a newline when going out of +// scope. +class GTEST_API_ GTestLog { + public: + GTestLog(GTestLogSeverity severity, const char* file, int line); + + // Flushes the buffers and, if severity is GTEST_FATAL, aborts the program. + ~GTestLog(); + + ::std::ostream& GetStream() { return ::std::cerr; } + + private: + const GTestLogSeverity severity_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestLog); +}; + +#define GTEST_LOG_(severity) \ + ::testing::internal::GTestLog(::testing::internal::GTEST_##severity, \ + __FILE__, __LINE__).GetStream() + +inline void LogToStderr() {} +inline void FlushInfoLog() { fflush(NULL); } + +// INTERNAL IMPLEMENTATION - DO NOT USE. +// +// GTEST_CHECK_ is an all-mode assert. It aborts the program if the condition +// is not satisfied. +// Synopsys: +// GTEST_CHECK_(boolean_condition); +// or +// GTEST_CHECK_(boolean_condition) << "Additional message"; +// +// This checks the condition and if the condition is not satisfied +// it prints message about the condition violation, including the +// condition itself, plus additional message streamed into it, if any, +// and then it aborts the program. It aborts the program irrespective of +// whether it is built in the debug mode or not. +#define GTEST_CHECK_(condition) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::IsTrue(condition)) \ + ; \ + else \ + GTEST_LOG_(FATAL) << "Condition " #condition " failed. " + +// An all-mode assert to verify that the given POSIX-style function +// call returns 0 (indicating success). Known limitation: this +// doesn't expand to a balanced 'if' statement, so enclose the macro +// in {} if you need to use it as the only statement in an 'if' +// branch. +#define GTEST_CHECK_POSIX_SUCCESS_(posix_call) \ + if (const int gtest_error = (posix_call)) \ + GTEST_LOG_(FATAL) << #posix_call << "failed with error " \ + << gtest_error + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Use ImplicitCast_ as a safe version of static_cast for upcasting in +// the type hierarchy (e.g. casting a Foo* to a SuperclassOfFoo* or a +// const Foo*). When you use ImplicitCast_, the compiler checks that +// the cast is safe. Such explicit ImplicitCast_s are necessary in +// surprisingly many situations where C++ demands an exact type match +// instead of an argument type convertable to a target type. +// +// The syntax for using ImplicitCast_ is the same as for static_cast: +// +// ImplicitCast_(expr) +// +// ImplicitCast_ would have been part of the C++ standard library, +// but the proposal was submitted too late. It will probably make +// its way into the language in the future. +// +// This relatively ugly name is intentional. It prevents clashes with +// similar functions users may have (e.g., implicit_cast). The internal +// namespace alone is not enough because the function can be found by ADL. +template +inline To ImplicitCast_(To x) { return x; } + +// When you upcast (that is, cast a pointer from type Foo to type +// SuperclassOfFoo), it's fine to use ImplicitCast_<>, since upcasts +// always succeed. When you downcast (that is, cast a pointer from +// type Foo to type SubclassOfFoo), static_cast<> isn't safe, because +// how do you know the pointer is really of type SubclassOfFoo? It +// could be a bare Foo, or of type DifferentSubclassOfFoo. Thus, +// when you downcast, you should use this macro. In debug mode, we +// use dynamic_cast<> to double-check the downcast is legal (we die +// if it's not). In normal mode, we do the efficient static_cast<> +// instead. Thus, it's important to test in debug mode to make sure +// the cast is legal! +// This is the only place in the code we should use dynamic_cast<>. +// In particular, you SHOULDN'T be using dynamic_cast<> in order to +// do RTTI (eg code like this: +// if (dynamic_cast(foo)) HandleASubclass1Object(foo); +// if (dynamic_cast(foo)) HandleASubclass2Object(foo); +// You should design the code some other way not to need this. +// +// This relatively ugly name is intentional. It prevents clashes with +// similar functions users may have (e.g., down_cast). The internal +// namespace alone is not enough because the function can be found by ADL. +template // use like this: DownCast_(foo); +inline To DownCast_(From* f) { // so we only accept pointers + // Ensures that To is a sub-type of From *. This test is here only + // for compile-time type checking, and has no overhead in an + // optimized build at run-time, as it will be optimized away + // completely. + if (false) { + const To to = NULL; + ::testing::internal::ImplicitCast_(to); + } + +#if GTEST_HAS_RTTI + // RTTI: debug mode only! + GTEST_CHECK_(f == NULL || dynamic_cast(f) != NULL); +#endif + return static_cast(f); +} + +// Downcasts the pointer of type Base to Derived. +// Derived must be a subclass of Base. The parameter MUST +// point to a class of type Derived, not any subclass of it. +// When RTTI is available, the function performs a runtime +// check to enforce this. +template +Derived* CheckedDowncastToActualType(Base* base) { +#if GTEST_HAS_RTTI + GTEST_CHECK_(typeid(*base) == typeid(Derived)); + return dynamic_cast(base); // NOLINT +#else + return static_cast(base); // Poor man's downcast. +#endif +} + +#if GTEST_HAS_STREAM_REDIRECTION + +// Defines the stderr capturer: +// CaptureStdout - starts capturing stdout. +// GetCapturedStdout - stops capturing stdout and returns the captured string. +// CaptureStderr - starts capturing stderr. +// GetCapturedStderr - stops capturing stderr and returns the captured string. +// +GTEST_API_ void CaptureStdout(); +GTEST_API_ std::string GetCapturedStdout(); +GTEST_API_ void CaptureStderr(); +GTEST_API_ std::string GetCapturedStderr(); + +#endif // GTEST_HAS_STREAM_REDIRECTION + + +#if GTEST_HAS_DEATH_TEST + +const ::std::vector& GetInjectableArgvs(); +void SetInjectableArgvs(const ::std::vector* + new_argvs); + +// A copy of all command line arguments. Set by InitGoogleTest(). +extern ::std::vector g_argvs; + +#endif // GTEST_HAS_DEATH_TEST + +// Defines synchronization primitives. + +#if GTEST_HAS_PTHREAD + +// Sleeps for (roughly) n milli-seconds. This function is only for +// testing Google Test's own constructs. Don't use it in user tests, +// either directly or indirectly. +inline void SleepMilliseconds(int n) { + const timespec time = { + 0, // 0 seconds. + n * 1000L * 1000L, // And n ms. + }; + nanosleep(&time, NULL); +} + +// Allows a controller thread to pause execution of newly created +// threads until notified. Instances of this class must be created +// and destroyed in the controller thread. +// +// This class is only for testing Google Test's own constructs. Do not +// use it in user tests, either directly or indirectly. +class Notification { + public: + Notification() : notified_(false) { + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, NULL)); + } + ~Notification() { + pthread_mutex_destroy(&mutex_); + } + + // Notifies all threads created with this notification to start. Must + // be called from the controller thread. + void Notify() { + pthread_mutex_lock(&mutex_); + notified_ = true; + pthread_mutex_unlock(&mutex_); + } + + // Blocks until the controller thread notifies. Must be called from a test + // thread. + void WaitForNotification() { + for (;;) { + pthread_mutex_lock(&mutex_); + const bool notified = notified_; + pthread_mutex_unlock(&mutex_); + if (notified) + break; + SleepMilliseconds(10); + } + } + + private: + pthread_mutex_t mutex_; + bool notified_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification); +}; + +// As a C-function, ThreadFuncWithCLinkage cannot be templated itself. +// Consequently, it cannot select a correct instantiation of ThreadWithParam +// in order to call its Run(). Introducing ThreadWithParamBase as a +// non-templated base class for ThreadWithParam allows us to bypass this +// problem. +class ThreadWithParamBase { + public: + virtual ~ThreadWithParamBase() {} + virtual void Run() = 0; +}; + +// pthread_create() accepts a pointer to a function type with the C linkage. +// According to the Standard (7.5/1), function types with different linkages +// are different even if they are otherwise identical. Some compilers (for +// example, SunStudio) treat them as different types. Since class methods +// cannot be defined with C-linkage we need to define a free C-function to +// pass into pthread_create(). +extern "C" inline void* ThreadFuncWithCLinkage(void* thread) { + static_cast(thread)->Run(); + return NULL; +} + +// Helper class for testing Google Test's multi-threading constructs. +// To use it, write: +// +// void ThreadFunc(int param) { /* Do things with param */ } +// Notification thread_can_start; +// ... +// // The thread_can_start parameter is optional; you can supply NULL. +// ThreadWithParam thread(&ThreadFunc, 5, &thread_can_start); +// thread_can_start.Notify(); +// +// These classes are only for testing Google Test's own constructs. Do +// not use them in user tests, either directly or indirectly. +template +class ThreadWithParam : public ThreadWithParamBase { + public: + typedef void (*UserThreadFunc)(T); + + ThreadWithParam( + UserThreadFunc func, T param, Notification* thread_can_start) + : func_(func), + param_(param), + thread_can_start_(thread_can_start), + finished_(false) { + ThreadWithParamBase* const base = this; + // The thread can be created only after all fields except thread_ + // have been initialized. + GTEST_CHECK_POSIX_SUCCESS_( + pthread_create(&thread_, 0, &ThreadFuncWithCLinkage, base)); + } + ~ThreadWithParam() { Join(); } + + void Join() { + if (!finished_) { + GTEST_CHECK_POSIX_SUCCESS_(pthread_join(thread_, 0)); + finished_ = true; + } + } + + virtual void Run() { + if (thread_can_start_ != NULL) + thread_can_start_->WaitForNotification(); + func_(param_); + } + + private: + const UserThreadFunc func_; // User-supplied thread function. + const T param_; // User-supplied parameter to the thread function. + // When non-NULL, used to block execution until the controller thread + // notifies. + Notification* const thread_can_start_; + bool finished_; // true iff we know that the thread function has finished. + pthread_t thread_; // The native thread object. + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParam); +}; + +// MutexBase and Mutex implement mutex on pthreads-based platforms. They +// are used in conjunction with class MutexLock: +// +// Mutex mutex; +// ... +// MutexLock lock(&mutex); // Acquires the mutex and releases it at the end +// // of the current scope. +// +// MutexBase implements behavior for both statically and dynamically +// allocated mutexes. Do not use MutexBase directly. Instead, write +// the following to define a static mutex: +// +// GTEST_DEFINE_STATIC_MUTEX_(g_some_mutex); +// +// You can forward declare a static mutex like this: +// +// GTEST_DECLARE_STATIC_MUTEX_(g_some_mutex); +// +// To create a dynamic mutex, just define an object of type Mutex. +class MutexBase { + public: + // Acquires this mutex. + void Lock() { + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_lock(&mutex_)); + owner_ = pthread_self(); + has_owner_ = true; + } + + // Releases this mutex. + void Unlock() { + // Since the lock is being released the owner_ field should no longer be + // considered valid. We don't protect writing to has_owner_ here, as it's + // the caller's responsibility to ensure that the current thread holds the + // mutex when this is called. + has_owner_ = false; + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_unlock(&mutex_)); + } + + // Does nothing if the current thread holds the mutex. Otherwise, crashes + // with high probability. + void AssertHeld() const { + GTEST_CHECK_(has_owner_ && pthread_equal(owner_, pthread_self())) + << "The current thread is not holding the mutex @" << this; + } + + // A static mutex may be used before main() is entered. It may even + // be used before the dynamic initialization stage. Therefore we + // must be able to initialize a static mutex object at link time. + // This means MutexBase has to be a POD and its member variables + // have to be public. + public: + pthread_mutex_t mutex_; // The underlying pthread mutex. + // has_owner_ indicates whether the owner_ field below contains a valid thread + // ID and is therefore safe to inspect (e.g., to use in pthread_equal()). All + // accesses to the owner_ field should be protected by a check of this field. + // An alternative might be to memset() owner_ to all zeros, but there's no + // guarantee that a zero'd pthread_t is necessarily invalid or even different + // from pthread_self(). + bool has_owner_; + pthread_t owner_; // The thread holding the mutex. +}; + +// Forward-declares a static mutex. +# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \ + extern ::testing::internal::MutexBase mutex + +// Defines and statically (i.e. at link time) initializes a static mutex. +// The initialization list here does not explicitly initialize each field, +// instead relying on default initialization for the unspecified fields. In +// particular, the owner_ field (a pthread_t) is not explicitly initialized. +// This allows initialization to work whether pthread_t is a scalar or struct. +// The flag -Wmissing-field-initializers must not be specified for this to work. +# define GTEST_DEFINE_STATIC_MUTEX_(mutex) \ + ::testing::internal::MutexBase mutex = { PTHREAD_MUTEX_INITIALIZER, false } + +// The Mutex class can only be used for mutexes created at runtime. It +// shares its API with MutexBase otherwise. +class Mutex : public MutexBase { + public: + Mutex() { + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, NULL)); + has_owner_ = false; + } + ~Mutex() { + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_destroy(&mutex_)); + } + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(Mutex); +}; + +// We cannot name this class MutexLock as the ctor declaration would +// conflict with a macro named MutexLock, which is defined on some +// platforms. Hence the typedef trick below. +class GTestMutexLock { + public: + explicit GTestMutexLock(MutexBase* mutex) + : mutex_(mutex) { mutex_->Lock(); } + + ~GTestMutexLock() { mutex_->Unlock(); } + + private: + MutexBase* const mutex_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestMutexLock); +}; + +typedef GTestMutexLock MutexLock; + +// Helpers for ThreadLocal. + +// pthread_key_create() requires DeleteThreadLocalValue() to have +// C-linkage. Therefore it cannot be templatized to access +// ThreadLocal. Hence the need for class +// ThreadLocalValueHolderBase. +class ThreadLocalValueHolderBase { + public: + virtual ~ThreadLocalValueHolderBase() {} +}; + +// Called by pthread to delete thread-local data stored by +// pthread_setspecific(). +extern "C" inline void DeleteThreadLocalValue(void* value_holder) { + delete static_cast(value_holder); +} + +// Implements thread-local storage on pthreads-based systems. +// +// // Thread 1 +// ThreadLocal tl(100); // 100 is the default value for each thread. +// +// // Thread 2 +// tl.set(150); // Changes the value for thread 2 only. +// EXPECT_EQ(150, tl.get()); +// +// // Thread 1 +// EXPECT_EQ(100, tl.get()); // In thread 1, tl has the original value. +// tl.set(200); +// EXPECT_EQ(200, tl.get()); +// +// The template type argument T must have a public copy constructor. +// In addition, the default ThreadLocal constructor requires T to have +// a public default constructor. +// +// An object managed for a thread by a ThreadLocal instance is deleted +// when the thread exits. Or, if the ThreadLocal instance dies in +// that thread, when the ThreadLocal dies. It's the user's +// responsibility to ensure that all other threads using a ThreadLocal +// have exited when it dies, or the per-thread objects for those +// threads will not be deleted. +// +// Google Test only uses global ThreadLocal objects. That means they +// will die after main() has returned. Therefore, no per-thread +// object managed by Google Test will be leaked as long as all threads +// using Google Test have exited when main() returns. +template +class ThreadLocal { + public: + ThreadLocal() : key_(CreateKey()), + default_() {} + explicit ThreadLocal(const T& value) : key_(CreateKey()), + default_(value) {} + + ~ThreadLocal() { + // Destroys the managed object for the current thread, if any. + DeleteThreadLocalValue(pthread_getspecific(key_)); + + // Releases resources associated with the key. This will *not* + // delete managed objects for other threads. + GTEST_CHECK_POSIX_SUCCESS_(pthread_key_delete(key_)); + } + + T* pointer() { return GetOrCreateValue(); } + const T* pointer() const { return GetOrCreateValue(); } + const T& get() const { return *pointer(); } + void set(const T& value) { *pointer() = value; } + + private: + // Holds a value of type T. + class ValueHolder : public ThreadLocalValueHolderBase { + public: + explicit ValueHolder(const T& value) : value_(value) {} + + T* pointer() { return &value_; } + + private: + T value_; + GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolder); + }; + + static pthread_key_t CreateKey() { + pthread_key_t key; + // When a thread exits, DeleteThreadLocalValue() will be called on + // the object managed for that thread. + GTEST_CHECK_POSIX_SUCCESS_( + pthread_key_create(&key, &DeleteThreadLocalValue)); + return key; + } + + T* GetOrCreateValue() const { + ThreadLocalValueHolderBase* const holder = + static_cast(pthread_getspecific(key_)); + if (holder != NULL) { + return CheckedDowncastToActualType(holder)->pointer(); + } + + ValueHolder* const new_holder = new ValueHolder(default_); + ThreadLocalValueHolderBase* const holder_base = new_holder; + GTEST_CHECK_POSIX_SUCCESS_(pthread_setspecific(key_, holder_base)); + return new_holder->pointer(); + } + + // A key pthreads uses for looking up per-thread values. + const pthread_key_t key_; + const T default_; // The default value for each thread. + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocal); +}; + +# define GTEST_IS_THREADSAFE 1 + +#else // GTEST_HAS_PTHREAD + +// A dummy implementation of synchronization primitives (mutex, lock, +// and thread-local variable). Necessary for compiling Google Test where +// mutex is not supported - using Google Test in multiple threads is not +// supported on such platforms. + +class Mutex { + public: + Mutex() {} + void Lock() {} + void Unlock() {} + void AssertHeld() const {} +}; + +# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \ + extern ::testing::internal::Mutex mutex + +# define GTEST_DEFINE_STATIC_MUTEX_(mutex) ::testing::internal::Mutex mutex + +class GTestMutexLock { + public: + explicit GTestMutexLock(Mutex*) {} // NOLINT +}; + +typedef GTestMutexLock MutexLock; + +template +class ThreadLocal { + public: + ThreadLocal() : value_() {} + explicit ThreadLocal(const T& value) : value_(value) {} + T* pointer() { return &value_; } + const T* pointer() const { return &value_; } + const T& get() const { return value_; } + void set(const T& value) { value_ = value; } + private: + T value_; +}; + +// The above synchronization primitives have dummy implementations. +// Therefore Google Test is not thread-safe. +# define GTEST_IS_THREADSAFE 0 + +#endif // GTEST_HAS_PTHREAD + +// Returns the number of threads running in the process, or 0 to indicate that +// we cannot detect it. +GTEST_API_ size_t GetThreadCount(); + +// Passing non-POD classes through ellipsis (...) crashes the ARM +// compiler and generates a warning in Sun Studio. The Nokia Symbian +// and the IBM XL C/C++ compiler try to instantiate a copy constructor +// for objects passed through ellipsis (...), failing for uncopyable +// objects. We define this to ensure that only POD is passed through +// ellipsis on these systems. +#if defined(__SYMBIAN32__) || defined(__IBMCPP__) || defined(__SUNPRO_CC) +// We lose support for NULL detection where the compiler doesn't like +// passing non-POD classes through ellipsis (...). +# define GTEST_ELLIPSIS_NEEDS_POD_ 1 +#else +# define GTEST_CAN_COMPARE_NULL 1 +#endif + +// The Nokia Symbian and IBM XL C/C++ compilers cannot decide between +// const T& and const T* in a function template. These compilers +// _can_ decide between class template specializations for T and T*, +// so a tr1::type_traits-like is_pointer works. +#if defined(__SYMBIAN32__) || defined(__IBMCPP__) +# define GTEST_NEEDS_IS_POINTER_ 1 +#endif + +template +struct bool_constant { + typedef bool_constant type; + static const bool value = bool_value; +}; +template const bool bool_constant::value; + +typedef bool_constant false_type; +typedef bool_constant true_type; + +template +struct is_pointer : public false_type {}; + +template +struct is_pointer : public true_type {}; + +template +struct IteratorTraits { + typedef typename Iterator::value_type value_type; +}; + +template +struct IteratorTraits { + typedef T value_type; +}; + +template +struct IteratorTraits { + typedef T value_type; +}; + +#if GTEST_OS_WINDOWS +# define GTEST_PATH_SEP_ "\\" +# define GTEST_HAS_ALT_PATH_SEP_ 1 +// The biggest signed integer type the compiler supports. +typedef __int64 BiggestInt; +#else +# define GTEST_PATH_SEP_ "/" +# define GTEST_HAS_ALT_PATH_SEP_ 0 +typedef long long BiggestInt; // NOLINT +#endif // GTEST_OS_WINDOWS + +// Utilities for char. + +// isspace(int ch) and friends accept an unsigned char or EOF. char +// may be signed, depending on the compiler (or compiler flags). +// Therefore we need to cast a char to unsigned char before calling +// isspace(), etc. + +inline bool IsAlpha(char ch) { + return isalpha(static_cast(ch)) != 0; +} +inline bool IsAlNum(char ch) { + return isalnum(static_cast(ch)) != 0; +} +inline bool IsDigit(char ch) { + return isdigit(static_cast(ch)) != 0; +} +inline bool IsLower(char ch) { + return islower(static_cast(ch)) != 0; +} +inline bool IsSpace(char ch) { + return isspace(static_cast(ch)) != 0; +} +inline bool IsUpper(char ch) { + return isupper(static_cast(ch)) != 0; +} +inline bool IsXDigit(char ch) { + return isxdigit(static_cast(ch)) != 0; +} +inline bool IsXDigit(wchar_t ch) { + const unsigned char low_byte = static_cast(ch); + return ch == low_byte && isxdigit(low_byte) != 0; +} + +inline char ToLower(char ch) { + return static_cast(tolower(static_cast(ch))); +} +inline char ToUpper(char ch) { + return static_cast(toupper(static_cast(ch))); +} + +// The testing::internal::posix namespace holds wrappers for common +// POSIX functions. These wrappers hide the differences between +// Windows/MSVC and POSIX systems. Since some compilers define these +// standard functions as macros, the wrapper cannot have the same name +// as the wrapped function. + +namespace posix { + +// Functions with a different name on Windows. + +#if GTEST_OS_WINDOWS + +typedef struct _stat StatStruct; + +# ifdef __BORLANDC__ +inline int IsATTY(int fd) { return isatty(fd); } +inline int StrCaseCmp(const char* s1, const char* s2) { + return stricmp(s1, s2); +} +inline char* StrDup(const char* src) { return strdup(src); } +# else // !__BORLANDC__ +# if GTEST_OS_WINDOWS_MOBILE +inline int IsATTY(int /* fd */) { return 0; } +# else +inline int IsATTY(int fd) { return _isatty(fd); } +# endif // GTEST_OS_WINDOWS_MOBILE +inline int StrCaseCmp(const char* s1, const char* s2) { + return _stricmp(s1, s2); +} +inline char* StrDup(const char* src) { return _strdup(src); } +# endif // __BORLANDC__ + +# if GTEST_OS_WINDOWS_MOBILE +inline int FileNo(FILE* file) { return reinterpret_cast(_fileno(file)); } +// Stat(), RmDir(), and IsDir() are not needed on Windows CE at this +// time and thus not defined there. +# else +inline int FileNo(FILE* file) { return _fileno(file); } +inline int Stat(const char* path, StatStruct* buf) { return _stat(path, buf); } +inline int RmDir(const char* dir) { return _rmdir(dir); } +inline bool IsDir(const StatStruct& st) { + return (_S_IFDIR & st.st_mode) != 0; +} +# endif // GTEST_OS_WINDOWS_MOBILE + +#else + +typedef struct stat StatStruct; + +inline int FileNo(FILE* file) { return fileno(file); } +inline int IsATTY(int fd) { return isatty(fd); } +inline int Stat(const char* path, StatStruct* buf) { return stat(path, buf); } +inline int StrCaseCmp(const char* s1, const char* s2) { + return strcasecmp(s1, s2); +} +inline char* StrDup(const char* src) { return strdup(src); } +inline int RmDir(const char* dir) { return rmdir(dir); } +inline bool IsDir(const StatStruct& st) { return S_ISDIR(st.st_mode); } + +#endif // GTEST_OS_WINDOWS + +// Functions deprecated by MSVC 8.0. + +#ifdef _MSC_VER +// Temporarily disable warning 4996 (deprecated function). +# pragma warning(push) +# pragma warning(disable:4996) +#endif + +inline const char* StrNCpy(char* dest, const char* src, size_t n) { + return strncpy(dest, src, n); +} + +// ChDir(), FReopen(), FDOpen(), Read(), Write(), Close(), and +// StrError() aren't needed on Windows CE at this time and thus not +// defined there. + +#if !GTEST_OS_WINDOWS_MOBILE +inline int ChDir(const char* dir) { return chdir(dir); } +#endif +inline FILE* FOpen(const char* path, const char* mode) { + return fopen(path, mode); +} +#if !GTEST_OS_WINDOWS_MOBILE +inline FILE *FReopen(const char* path, const char* mode, FILE* stream) { + return freopen(path, mode, stream); +} +inline FILE* FDOpen(int fd, const char* mode) { return fdopen(fd, mode); } +#endif +inline int FClose(FILE* fp) { return fclose(fp); } +#if !GTEST_OS_WINDOWS_MOBILE +inline int Read(int fd, void* buf, unsigned int count) { + return static_cast(read(fd, buf, count)); +} +inline int Write(int fd, const void* buf, unsigned int count) { + return static_cast(write(fd, buf, count)); +} +inline int Close(int fd) { return close(fd); } +inline const char* StrError(int errnum) { return strerror(errnum); } +#endif +inline const char* GetEnv(const char* name) { +#if GTEST_OS_WINDOWS_MOBILE + // We are on Windows CE, which has no environment variables. + return NULL; +#elif defined(__BORLANDC__) || defined(__SunOS_5_8) || defined(__SunOS_5_9) + // Environment variables which we programmatically clear will be set to the + // empty string rather than unset (NULL). Handle that case. + const char* const env = getenv(name); + return (env != NULL && env[0] != '\0') ? env : NULL; +#else + return getenv(name); +#endif +} + +#ifdef _MSC_VER +# pragma warning(pop) // Restores the warning state. +#endif + +#if GTEST_OS_WINDOWS_MOBILE +// Windows CE has no C library. The abort() function is used in +// several places in Google Test. This implementation provides a reasonable +// imitation of standard behaviour. +void Abort(); +#else +inline void Abort() { abort(); } +#endif // GTEST_OS_WINDOWS_MOBILE + +} // namespace posix + +// MSVC "deprecates" snprintf and issues warnings wherever it is used. In +// order to avoid these warnings, we need to use _snprintf or _snprintf_s on +// MSVC-based platforms. We map the GTEST_SNPRINTF_ macro to the appropriate +// function in order to achieve that. We use macro definition here because +// snprintf is a variadic function. +#if _MSC_VER >= 1400 && !GTEST_OS_WINDOWS_MOBILE +// MSVC 2005 and above support variadic macros. +# define GTEST_SNPRINTF_(buffer, size, format, ...) \ + _snprintf_s(buffer, size, size, format, __VA_ARGS__) +#elif defined(_MSC_VER) +// Windows CE does not define _snprintf_s and MSVC prior to 2005 doesn't +// complain about _snprintf. +# define GTEST_SNPRINTF_ _snprintf +#else +# define GTEST_SNPRINTF_ snprintf +#endif + +// The maximum number a BiggestInt can represent. This definition +// works no matter BiggestInt is represented in one's complement or +// two's complement. +// +// We cannot rely on numeric_limits in STL, as __int64 and long long +// are not part of standard C++ and numeric_limits doesn't need to be +// defined for them. +const BiggestInt kMaxBiggestInt = + ~(static_cast(1) << (8*sizeof(BiggestInt) - 1)); + +// This template class serves as a compile-time function from size to +// type. It maps a size in bytes to a primitive type with that +// size. e.g. +// +// TypeWithSize<4>::UInt +// +// is typedef-ed to be unsigned int (unsigned integer made up of 4 +// bytes). +// +// Such functionality should belong to STL, but I cannot find it +// there. +// +// Google Test uses this class in the implementation of floating-point +// comparison. +// +// For now it only handles UInt (unsigned int) as that's all Google Test +// needs. Other types can be easily added in the future if need +// arises. +template +class TypeWithSize { + public: + // This prevents the user from using TypeWithSize with incorrect + // values of N. + typedef void UInt; +}; + +// The specialization for size 4. +template <> +class TypeWithSize<4> { + public: + // unsigned int has size 4 in both gcc and MSVC. + // + // As base/basictypes.h doesn't compile on Windows, we cannot use + // uint32, uint64, and etc here. + typedef int Int; + typedef unsigned int UInt; +}; + +// The specialization for size 8. +template <> +class TypeWithSize<8> { + public: +#if GTEST_OS_WINDOWS + typedef __int64 Int; + typedef unsigned __int64 UInt; +#else + typedef long long Int; // NOLINT + typedef unsigned long long UInt; // NOLINT +#endif // GTEST_OS_WINDOWS +}; + +// Integer types of known sizes. +typedef TypeWithSize<4>::Int Int32; +typedef TypeWithSize<4>::UInt UInt32; +typedef TypeWithSize<8>::Int Int64; +typedef TypeWithSize<8>::UInt UInt64; +typedef TypeWithSize<8>::Int TimeInMillis; // Represents time in milliseconds. + +// Utilities for command line flags and environment variables. + +// Macro for referencing flags. +#define GTEST_FLAG(name) FLAGS_gtest_##name + +// Macros for declaring flags. +#define GTEST_DECLARE_bool_(name) GTEST_API_ extern bool GTEST_FLAG(name) +#define GTEST_DECLARE_int32_(name) \ + GTEST_API_ extern ::testing::internal::Int32 GTEST_FLAG(name) +#define GTEST_DECLARE_string_(name) \ + GTEST_API_ extern ::std::string GTEST_FLAG(name) + +// Macros for defining flags. +#define GTEST_DEFINE_bool_(name, default_val, doc) \ + GTEST_API_ bool GTEST_FLAG(name) = (default_val) +#define GTEST_DEFINE_int32_(name, default_val, doc) \ + GTEST_API_ ::testing::internal::Int32 GTEST_FLAG(name) = (default_val) +#define GTEST_DEFINE_string_(name, default_val, doc) \ + GTEST_API_ ::std::string GTEST_FLAG(name) = (default_val) + +// Thread annotations +#define GTEST_EXCLUSIVE_LOCK_REQUIRED_(locks) +#define GTEST_LOCK_EXCLUDED_(locks) + +// Parses 'str' for a 32-bit signed integer. If successful, writes the result +// to *value and returns true; otherwise leaves *value unchanged and returns +// false. +// TODO(chandlerc): Find a better way to refactor flag and environment parsing +// out of both gtest-port.cc and gtest.cc to avoid exporting this utility +// function. +bool ParseInt32(const Message& src_text, const char* str, Int32* value); + +// Parses a bool/Int32/string from the environment variable +// corresponding to the given Google Test flag. +bool BoolFromGTestEnv(const char* flag, bool default_val); +GTEST_API_ Int32 Int32FromGTestEnv(const char* flag, Int32 default_val); +const char* StringFromGTestEnv(const char* flag, const char* default_val); + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_ + +#if GTEST_OS_LINUX +# include +# include +# include +# include +#endif // GTEST_OS_LINUX + +#if GTEST_HAS_EXCEPTIONS +# include +#endif + +#include +#include +#include +#include +#include +#include + +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file defines the Message class. +// +// IMPORTANT NOTE: Due to limitation of the C++ language, we have to +// leave some internal implementation details in this header file. +// They are clearly marked by comments like this: +// +// // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +// +// Such code is NOT meant to be used by a user directly, and is subject +// to CHANGE WITHOUT NOTICE. Therefore DO NOT DEPEND ON IT in a user +// program! + +#ifndef GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_ +#define GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_ + +#include + + +// Ensures that there is at least one operator<< in the global namespace. +// See Message& operator<<(...) below for why. +void operator<<(const testing::internal::Secret&, int); + +namespace testing { + +// The Message class works like an ostream repeater. +// +// Typical usage: +// +// 1. You stream a bunch of values to a Message object. +// It will remember the text in a stringstream. +// 2. Then you stream the Message object to an ostream. +// This causes the text in the Message to be streamed +// to the ostream. +// +// For example; +// +// testing::Message foo; +// foo << 1 << " != " << 2; +// std::cout << foo; +// +// will print "1 != 2". +// +// Message is not intended to be inherited from. In particular, its +// destructor is not virtual. +// +// Note that stringstream behaves differently in gcc and in MSVC. You +// can stream a NULL char pointer to it in the former, but not in the +// latter (it causes an access violation if you do). The Message +// class hides this difference by treating a NULL char pointer as +// "(null)". +class GTEST_API_ Message { + private: + // The type of basic IO manipulators (endl, ends, and flush) for + // narrow streams. + typedef std::ostream& (*BasicNarrowIoManip)(std::ostream&); + + public: + // Constructs an empty Message. + Message(); + + // Copy constructor. + Message(const Message& msg) : ss_(new ::std::stringstream) { // NOLINT + *ss_ << msg.GetString(); + } + + // Constructs a Message from a C-string. + explicit Message(const char* str) : ss_(new ::std::stringstream) { + *ss_ << str; + } + +#if GTEST_OS_SYMBIAN + // Streams a value (either a pointer or not) to this object. + template + inline Message& operator <<(const T& value) { + StreamHelper(typename internal::is_pointer::type(), value); + return *this; + } +#else + // Streams a non-pointer value to this object. + template + inline Message& operator <<(const T& val) { + // Some libraries overload << for STL containers. These + // overloads are defined in the global namespace instead of ::std. + // + // C++'s symbol lookup rule (i.e. Koenig lookup) says that these + // overloads are visible in either the std namespace or the global + // namespace, but not other namespaces, including the testing + // namespace which Google Test's Message class is in. + // + // To allow STL containers (and other types that has a << operator + // defined in the global namespace) to be used in Google Test + // assertions, testing::Message must access the custom << operator + // from the global namespace. With this using declaration, + // overloads of << defined in the global namespace and those + // visible via Koenig lookup are both exposed in this function. + using ::operator <<; + *ss_ << val; + return *this; + } + + // Streams a pointer value to this object. + // + // This function is an overload of the previous one. When you + // stream a pointer to a Message, this definition will be used as it + // is more specialized. (The C++ Standard, section + // [temp.func.order].) If you stream a non-pointer, then the + // previous definition will be used. + // + // The reason for this overload is that streaming a NULL pointer to + // ostream is undefined behavior. Depending on the compiler, you + // may get "0", "(nil)", "(null)", or an access violation. To + // ensure consistent result across compilers, we always treat NULL + // as "(null)". + template + inline Message& operator <<(T* const& pointer) { // NOLINT + if (pointer == NULL) { + *ss_ << "(null)"; + } else { + *ss_ << pointer; + } + return *this; + } +#endif // GTEST_OS_SYMBIAN + + // Since the basic IO manipulators are overloaded for both narrow + // and wide streams, we have to provide this specialized definition + // of operator <<, even though its body is the same as the + // templatized version above. Without this definition, streaming + // endl or other basic IO manipulators to Message will confuse the + // compiler. + Message& operator <<(BasicNarrowIoManip val) { + *ss_ << val; + return *this; + } + + // Instead of 1/0, we want to see true/false for bool values. + Message& operator <<(bool b) { + return *this << (b ? "true" : "false"); + } + + // These two overloads allow streaming a wide C string to a Message + // using the UTF-8 encoding. + Message& operator <<(const wchar_t* wide_c_str); + Message& operator <<(wchar_t* wide_c_str); + +#if GTEST_HAS_STD_WSTRING + // Converts the given wide string to a narrow string using the UTF-8 + // encoding, and streams the result to this Message object. + Message& operator <<(const ::std::wstring& wstr); +#endif // GTEST_HAS_STD_WSTRING + +#if GTEST_HAS_GLOBAL_WSTRING + // Converts the given wide string to a narrow string using the UTF-8 + // encoding, and streams the result to this Message object. + Message& operator <<(const ::wstring& wstr); +#endif // GTEST_HAS_GLOBAL_WSTRING + + // Gets the text streamed to this object so far as an std::string. + // Each '\0' character in the buffer is replaced with "\\0". + // + // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + std::string GetString() const; + + private: + +#if GTEST_OS_SYMBIAN + // These are needed as the Nokia Symbian Compiler cannot decide between + // const T& and const T* in a function template. The Nokia compiler _can_ + // decide between class template specializations for T and T*, so a + // tr1::type_traits-like is_pointer works, and we can overload on that. + template + inline void StreamHelper(internal::true_type /*is_pointer*/, T* pointer) { + if (pointer == NULL) { + *ss_ << "(null)"; + } else { + *ss_ << pointer; + } + } + template + inline void StreamHelper(internal::false_type /*is_pointer*/, + const T& value) { + // See the comments in Message& operator <<(const T&) above for why + // we need this using statement. + using ::operator <<; + *ss_ << value; + } +#endif // GTEST_OS_SYMBIAN + + // We'll hold the text streamed to this object here. + const internal::scoped_ptr< ::std::stringstream> ss_; + + // We declare (but don't implement) this to prevent the compiler + // from implementing the assignment operator. + void operator=(const Message&); +}; + +// Streams a Message to an ostream. +inline std::ostream& operator <<(std::ostream& os, const Message& sb) { + return os << sb.GetString(); +} + +namespace internal { + +// Converts a streamable value to an std::string. A NULL pointer is +// converted to "(null)". When the input value is a ::string, +// ::std::string, ::wstring, or ::std::wstring object, each NUL +// character in it is replaced with "\\0". +template +std::string StreamableToString(const T& streamable) { + return (Message() << streamable).GetString(); +} + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file declares the String class and functions used internally by +// Google Test. They are subject to change without notice. They should not used +// by code external to Google Test. +// +// This header file is #included by . +// It should not be #included by other files. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_ + +#ifdef __BORLANDC__ +// string.h is not guaranteed to provide strcpy on C++ Builder. +# include +#endif + +#include +#include + + +namespace testing { +namespace internal { + +// String - an abstract class holding static string utilities. +class GTEST_API_ String { + public: + // Static utility methods + + // Clones a 0-terminated C string, allocating memory using new. The + // caller is responsible for deleting the return value using + // delete[]. Returns the cloned string, or NULL if the input is + // NULL. + // + // This is different from strdup() in string.h, which allocates + // memory using malloc(). + static const char* CloneCString(const char* c_str); + +#if GTEST_OS_WINDOWS_MOBILE + // Windows CE does not have the 'ANSI' versions of Win32 APIs. To be + // able to pass strings to Win32 APIs on CE we need to convert them + // to 'Unicode', UTF-16. + + // Creates a UTF-16 wide string from the given ANSI string, allocating + // memory using new. The caller is responsible for deleting the return + // value using delete[]. Returns the wide string, or NULL if the + // input is NULL. + // + // The wide string is created using the ANSI codepage (CP_ACP) to + // match the behaviour of the ANSI versions of Win32 calls and the + // C runtime. + static LPCWSTR AnsiToUtf16(const char* c_str); + + // Creates an ANSI string from the given wide string, allocating + // memory using new. The caller is responsible for deleting the return + // value using delete[]. Returns the ANSI string, or NULL if the + // input is NULL. + // + // The returned string is created using the ANSI codepage (CP_ACP) to + // match the behaviour of the ANSI versions of Win32 calls and the + // C runtime. + static const char* Utf16ToAnsi(LPCWSTR utf16_str); +#endif + + // Compares two C strings. Returns true iff they have the same content. + // + // Unlike strcmp(), this function can handle NULL argument(s). A + // NULL C string is considered different to any non-NULL C string, + // including the empty string. + static bool CStringEquals(const char* lhs, const char* rhs); + + // Converts a wide C string to a String using the UTF-8 encoding. + // NULL will be converted to "(null)". If an error occurred during + // the conversion, "(failed to convert from wide string)" is + // returned. + static std::string ShowWideCString(const wchar_t* wide_c_str); + + // Compares two wide C strings. Returns true iff they have the same + // content. + // + // Unlike wcscmp(), this function can handle NULL argument(s). A + // NULL C string is considered different to any non-NULL C string, + // including the empty string. + static bool WideCStringEquals(const wchar_t* lhs, const wchar_t* rhs); + + // Compares two C strings, ignoring case. Returns true iff they + // have the same content. + // + // Unlike strcasecmp(), this function can handle NULL argument(s). + // A NULL C string is considered different to any non-NULL C string, + // including the empty string. + static bool CaseInsensitiveCStringEquals(const char* lhs, + const char* rhs); + + // Compares two wide C strings, ignoring case. Returns true iff they + // have the same content. + // + // Unlike wcscasecmp(), this function can handle NULL argument(s). + // A NULL C string is considered different to any non-NULL wide C string, + // including the empty string. + // NB: The implementations on different platforms slightly differ. + // On windows, this method uses _wcsicmp which compares according to LC_CTYPE + // environment variable. On GNU platform this method uses wcscasecmp + // which compares according to LC_CTYPE category of the current locale. + // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the + // current locale. + static bool CaseInsensitiveWideCStringEquals(const wchar_t* lhs, + const wchar_t* rhs); + + // Returns true iff the given string ends with the given suffix, ignoring + // case. Any string is considered to end with an empty suffix. + static bool EndsWithCaseInsensitive( + const std::string& str, const std::string& suffix); + + // Formats an int value as "%02d". + static std::string FormatIntWidth2(int value); // "%02d" for width == 2 + + // Formats an int value as "%X". + static std::string FormatHexInt(int value); + + // Formats a byte as "%02X". + static std::string FormatByte(unsigned char value); + + private: + String(); // Not meant to be instantiated. +}; // class String + +// Gets the content of the stringstream's buffer as an std::string. Each '\0' +// character in the buffer is replaced with "\\0". +GTEST_API_ std::string StringStreamToString(::std::stringstream* stream); + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: keith.ray@gmail.com (Keith Ray) +// +// Google Test filepath utilities +// +// This header file declares classes and functions used internally by +// Google Test. They are subject to change without notice. +// +// This file is #included in . +// Do not include this header file separately! + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_ + + +namespace testing { +namespace internal { + +// FilePath - a class for file and directory pathname manipulation which +// handles platform-specific conventions (like the pathname separator). +// Used for helper functions for naming files in a directory for xml output. +// Except for Set methods, all methods are const or static, which provides an +// "immutable value object" -- useful for peace of mind. +// A FilePath with a value ending in a path separator ("like/this/") represents +// a directory, otherwise it is assumed to represent a file. In either case, +// it may or may not represent an actual file or directory in the file system. +// Names are NOT checked for syntax correctness -- no checking for illegal +// characters, malformed paths, etc. + +class GTEST_API_ FilePath { + public: + FilePath() : pathname_("") { } + FilePath(const FilePath& rhs) : pathname_(rhs.pathname_) { } + + explicit FilePath(const std::string& pathname) : pathname_(pathname) { + Normalize(); + } + + FilePath& operator=(const FilePath& rhs) { + Set(rhs); + return *this; + } + + void Set(const FilePath& rhs) { + pathname_ = rhs.pathname_; + } + + const std::string& string() const { return pathname_; } + const char* c_str() const { return pathname_.c_str(); } + + // Returns the current working directory, or "" if unsuccessful. + static FilePath GetCurrentDir(); + + // Given directory = "dir", base_name = "test", number = 0, + // extension = "xml", returns "dir/test.xml". If number is greater + // than zero (e.g., 12), returns "dir/test_12.xml". + // On Windows platform, uses \ as the separator rather than /. + static FilePath MakeFileName(const FilePath& directory, + const FilePath& base_name, + int number, + const char* extension); + + // Given directory = "dir", relative_path = "test.xml", + // returns "dir/test.xml". + // On Windows, uses \ as the separator rather than /. + static FilePath ConcatPaths(const FilePath& directory, + const FilePath& relative_path); + + // Returns a pathname for a file that does not currently exist. The pathname + // will be directory/base_name.extension or + // directory/base_name_.extension if directory/base_name.extension + // already exists. The number will be incremented until a pathname is found + // that does not already exist. + // Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'. + // There could be a race condition if two or more processes are calling this + // function at the same time -- they could both pick the same filename. + static FilePath GenerateUniqueFileName(const FilePath& directory, + const FilePath& base_name, + const char* extension); + + // Returns true iff the path is "". + bool IsEmpty() const { return pathname_.empty(); } + + // If input name has a trailing separator character, removes it and returns + // the name, otherwise return the name string unmodified. + // On Windows platform, uses \ as the separator, other platforms use /. + FilePath RemoveTrailingPathSeparator() const; + + // Returns a copy of the FilePath with the directory part removed. + // Example: FilePath("path/to/file").RemoveDirectoryName() returns + // FilePath("file"). If there is no directory part ("just_a_file"), it returns + // the FilePath unmodified. If there is no file part ("just_a_dir/") it + // returns an empty FilePath (""). + // On Windows platform, '\' is the path separator, otherwise it is '/'. + FilePath RemoveDirectoryName() const; + + // RemoveFileName returns the directory path with the filename removed. + // Example: FilePath("path/to/file").RemoveFileName() returns "path/to/". + // If the FilePath is "a_file" or "/a_file", RemoveFileName returns + // FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does + // not have a file, like "just/a/dir/", it returns the FilePath unmodified. + // On Windows platform, '\' is the path separator, otherwise it is '/'. + FilePath RemoveFileName() const; + + // Returns a copy of the FilePath with the case-insensitive extension removed. + // Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns + // FilePath("dir/file"). If a case-insensitive extension is not + // found, returns a copy of the original FilePath. + FilePath RemoveExtension(const char* extension) const; + + // Creates directories so that path exists. Returns true if successful or if + // the directories already exist; returns false if unable to create + // directories for any reason. Will also return false if the FilePath does + // not represent a directory (that is, it doesn't end with a path separator). + bool CreateDirectoriesRecursively() const; + + // Create the directory so that path exists. Returns true if successful or + // if the directory already exists; returns false if unable to create the + // directory for any reason, including if the parent directory does not + // exist. Not named "CreateDirectory" because that's a macro on Windows. + bool CreateFolder() const; + + // Returns true if FilePath describes something in the file-system, + // either a file, directory, or whatever, and that something exists. + bool FileOrDirectoryExists() const; + + // Returns true if pathname describes a directory in the file-system + // that exists. + bool DirectoryExists() const; + + // Returns true if FilePath ends with a path separator, which indicates that + // it is intended to represent a directory. Returns false otherwise. + // This does NOT check that a directory (or file) actually exists. + bool IsDirectory() const; + + // Returns true if pathname describes a root directory. (Windows has one + // root directory per disk drive.) + bool IsRootDirectory() const; + + // Returns true if pathname describes an absolute path. + bool IsAbsolutePath() const; + + private: + // Replaces multiple consecutive separators with a single separator. + // For example, "bar///foo" becomes "bar/foo". Does not eliminate other + // redundancies that might be in a pathname involving "." or "..". + // + // A pathname with multiple consecutive separators may occur either through + // user error or as a result of some scripts or APIs that generate a pathname + // with a trailing separator. On other platforms the same API or script + // may NOT generate a pathname with a trailing "/". Then elsewhere that + // pathname may have another "/" and pathname components added to it, + // without checking for the separator already being there. + // The script language and operating system may allow paths like "foo//bar" + // but some of the functions in FilePath will not handle that correctly. In + // particular, RemoveTrailingPathSeparator() only removes one separator, and + // it is called in CreateDirectoriesRecursively() assuming that it will change + // a pathname from directory syntax (trailing separator) to filename syntax. + // + // On Windows this method also replaces the alternate path separator '/' with + // the primary path separator '\\', so that for example "bar\\/\\foo" becomes + // "bar\\foo". + + void Normalize(); + + // Returns a pointer to the last occurence of a valid path separator in + // the FilePath. On Windows, for example, both '/' and '\' are valid path + // separators. Returns NULL if no path separator was found. + const char* FindLastPathSeparator() const; + + std::string pathname_; +}; // class FilePath + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_ +// This file was GENERATED by command: +// pump.py gtest-type-util.h.pump +// DO NOT EDIT BY HAND!!! + +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Type utilities needed for implementing typed and type-parameterized +// tests. This file is generated by a SCRIPT. DO NOT EDIT BY HAND! +// +// Currently we support at most 50 types in a list, and at most 50 +// type-parameterized tests in one type-parameterized test case. +// Please contact googletestframework@googlegroups.com if you need +// more. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_ + + +// #ifdef __GNUC__ is too general here. It is possible to use gcc without using +// libstdc++ (which is where cxxabi.h comes from). +# if GTEST_HAS_CXXABI_H_ +# include +# elif defined(__HP_aCC) +# include +# endif // GTEST_HASH_CXXABI_H_ + +namespace testing { +namespace internal { + +// GetTypeName() returns a human-readable name of type T. +// NB: This function is also used in Google Mock, so don't move it inside of +// the typed-test-only section below. +template +std::string GetTypeName() { +# if GTEST_HAS_RTTI + + const char* const name = typeid(T).name(); +# if GTEST_HAS_CXXABI_H_ || defined(__HP_aCC) + int status = 0; + // gcc's implementation of typeid(T).name() mangles the type name, + // so we have to demangle it. +# if GTEST_HAS_CXXABI_H_ + using abi::__cxa_demangle; +# endif // GTEST_HAS_CXXABI_H_ + char* const readable_name = __cxa_demangle(name, 0, 0, &status); + const std::string name_str(status == 0 ? readable_name : name); + free(readable_name); + return name_str; +# else + return name; +# endif // GTEST_HAS_CXXABI_H_ || __HP_aCC + +# else + + return ""; + +# endif // GTEST_HAS_RTTI +} + +#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P + +// AssertyTypeEq::type is defined iff T1 and T2 are the same +// type. This can be used as a compile-time assertion to ensure that +// two types are equal. + +template +struct AssertTypeEq; + +template +struct AssertTypeEq { + typedef bool type; +}; + +// A unique type used as the default value for the arguments of class +// template Types. This allows us to simulate variadic templates +// (e.g. Types, Type, and etc), which C++ doesn't +// support directly. +struct None {}; + +// The following family of struct and struct templates are used to +// represent type lists. In particular, TypesN +// represents a type list with N types (T1, T2, ..., and TN) in it. +// Except for Types0, every struct in the family has two member types: +// Head for the first type in the list, and Tail for the rest of the +// list. + +// The empty type list. +struct Types0 {}; + +// Type lists of length 1, 2, 3, and so on. + +template +struct Types1 { + typedef T1 Head; + typedef Types0 Tail; +}; +template +struct Types2 { + typedef T1 Head; + typedef Types1 Tail; +}; + +template +struct Types3 { + typedef T1 Head; + typedef Types2 Tail; +}; + +template +struct Types4 { + typedef T1 Head; + typedef Types3 Tail; +}; + +template +struct Types5 { + typedef T1 Head; + typedef Types4 Tail; +}; + +template +struct Types6 { + typedef T1 Head; + typedef Types5 Tail; +}; + +template +struct Types7 { + typedef T1 Head; + typedef Types6 Tail; +}; + +template +struct Types8 { + typedef T1 Head; + typedef Types7 Tail; +}; + +template +struct Types9 { + typedef T1 Head; + typedef Types8 Tail; +}; + +template +struct Types10 { + typedef T1 Head; + typedef Types9 Tail; +}; + +template +struct Types11 { + typedef T1 Head; + typedef Types10 Tail; +}; + +template +struct Types12 { + typedef T1 Head; + typedef Types11 Tail; +}; + +template +struct Types13 { + typedef T1 Head; + typedef Types12 Tail; +}; + +template +struct Types14 { + typedef T1 Head; + typedef Types13 Tail; +}; + +template +struct Types15 { + typedef T1 Head; + typedef Types14 Tail; +}; + +template +struct Types16 { + typedef T1 Head; + typedef Types15 Tail; +}; + +template +struct Types17 { + typedef T1 Head; + typedef Types16 Tail; +}; + +template +struct Types18 { + typedef T1 Head; + typedef Types17 Tail; +}; + +template +struct Types19 { + typedef T1 Head; + typedef Types18 Tail; +}; + +template +struct Types20 { + typedef T1 Head; + typedef Types19 Tail; +}; + +template +struct Types21 { + typedef T1 Head; + typedef Types20 Tail; +}; + +template +struct Types22 { + typedef T1 Head; + typedef Types21 Tail; +}; + +template +struct Types23 { + typedef T1 Head; + typedef Types22 Tail; +}; + +template +struct Types24 { + typedef T1 Head; + typedef Types23 Tail; +}; + +template +struct Types25 { + typedef T1 Head; + typedef Types24 Tail; +}; + +template +struct Types26 { + typedef T1 Head; + typedef Types25 Tail; +}; + +template +struct Types27 { + typedef T1 Head; + typedef Types26 Tail; +}; + +template +struct Types28 { + typedef T1 Head; + typedef Types27 Tail; +}; + +template +struct Types29 { + typedef T1 Head; + typedef Types28 Tail; +}; + +template +struct Types30 { + typedef T1 Head; + typedef Types29 Tail; +}; + +template +struct Types31 { + typedef T1 Head; + typedef Types30 Tail; +}; + +template +struct Types32 { + typedef T1 Head; + typedef Types31 Tail; +}; + +template +struct Types33 { + typedef T1 Head; + typedef Types32 Tail; +}; + +template +struct Types34 { + typedef T1 Head; + typedef Types33 Tail; +}; + +template +struct Types35 { + typedef T1 Head; + typedef Types34 Tail; +}; + +template +struct Types36 { + typedef T1 Head; + typedef Types35 Tail; +}; + +template +struct Types37 { + typedef T1 Head; + typedef Types36 Tail; +}; + +template +struct Types38 { + typedef T1 Head; + typedef Types37 Tail; +}; + +template +struct Types39 { + typedef T1 Head; + typedef Types38 Tail; +}; + +template +struct Types40 { + typedef T1 Head; + typedef Types39 Tail; +}; + +template +struct Types41 { + typedef T1 Head; + typedef Types40 Tail; +}; + +template +struct Types42 { + typedef T1 Head; + typedef Types41 Tail; +}; + +template +struct Types43 { + typedef T1 Head; + typedef Types42 Tail; +}; + +template +struct Types44 { + typedef T1 Head; + typedef Types43 Tail; +}; + +template +struct Types45 { + typedef T1 Head; + typedef Types44 Tail; +}; + +template +struct Types46 { + typedef T1 Head; + typedef Types45 Tail; +}; + +template +struct Types47 { + typedef T1 Head; + typedef Types46 Tail; +}; + +template +struct Types48 { + typedef T1 Head; + typedef Types47 Tail; +}; + +template +struct Types49 { + typedef T1 Head; + typedef Types48 Tail; +}; + +template +struct Types50 { + typedef T1 Head; + typedef Types49 Tail; +}; + + +} // namespace internal + +// We don't want to require the users to write TypesN<...> directly, +// as that would require them to count the length. Types<...> is much +// easier to write, but generates horrible messages when there is a +// compiler error, as gcc insists on printing out each template +// argument, even if it has the default value (this means Types +// will appear as Types in the compiler +// errors). +// +// Our solution is to combine the best part of the two approaches: a +// user would write Types, and Google Test will translate +// that to TypesN internally to make error messages +// readable. The translation is done by the 'type' member of the +// Types template. +template +struct Types { + typedef internal::Types50 type; +}; + +template <> +struct Types { + typedef internal::Types0 type; +}; +template +struct Types { + typedef internal::Types1 type; +}; +template +struct Types { + typedef internal::Types2 type; +}; +template +struct Types { + typedef internal::Types3 type; +}; +template +struct Types { + typedef internal::Types4 type; +}; +template +struct Types { + typedef internal::Types5 type; +}; +template +struct Types { + typedef internal::Types6 type; +}; +template +struct Types { + typedef internal::Types7 type; +}; +template +struct Types { + typedef internal::Types8 type; +}; +template +struct Types { + typedef internal::Types9 type; +}; +template +struct Types { + typedef internal::Types10 type; +}; +template +struct Types { + typedef internal::Types11 type; +}; +template +struct Types { + typedef internal::Types12 type; +}; +template +struct Types { + typedef internal::Types13 type; +}; +template +struct Types { + typedef internal::Types14 type; +}; +template +struct Types { + typedef internal::Types15 type; +}; +template +struct Types { + typedef internal::Types16 type; +}; +template +struct Types { + typedef internal::Types17 type; +}; +template +struct Types { + typedef internal::Types18 type; +}; +template +struct Types { + typedef internal::Types19 type; +}; +template +struct Types { + typedef internal::Types20 type; +}; +template +struct Types { + typedef internal::Types21 type; +}; +template +struct Types { + typedef internal::Types22 type; +}; +template +struct Types { + typedef internal::Types23 type; +}; +template +struct Types { + typedef internal::Types24 type; +}; +template +struct Types { + typedef internal::Types25 type; +}; +template +struct Types { + typedef internal::Types26 type; +}; +template +struct Types { + typedef internal::Types27 type; +}; +template +struct Types { + typedef internal::Types28 type; +}; +template +struct Types { + typedef internal::Types29 type; +}; +template +struct Types { + typedef internal::Types30 type; +}; +template +struct Types { + typedef internal::Types31 type; +}; +template +struct Types { + typedef internal::Types32 type; +}; +template +struct Types { + typedef internal::Types33 type; +}; +template +struct Types { + typedef internal::Types34 type; +}; +template +struct Types { + typedef internal::Types35 type; +}; +template +struct Types { + typedef internal::Types36 type; +}; +template +struct Types { + typedef internal::Types37 type; +}; +template +struct Types { + typedef internal::Types38 type; +}; +template +struct Types { + typedef internal::Types39 type; +}; +template +struct Types { + typedef internal::Types40 type; +}; +template +struct Types { + typedef internal::Types41 type; +}; +template +struct Types { + typedef internal::Types42 type; +}; +template +struct Types { + typedef internal::Types43 type; +}; +template +struct Types { + typedef internal::Types44 type; +}; +template +struct Types { + typedef internal::Types45 type; +}; +template +struct Types { + typedef internal::Types46 type; +}; +template +struct Types { + typedef internal::Types47 type; +}; +template +struct Types { + typedef internal::Types48 type; +}; +template +struct Types { + typedef internal::Types49 type; +}; + +namespace internal { + +# define GTEST_TEMPLATE_ template class + +// The template "selector" struct TemplateSel is used to +// represent Tmpl, which must be a class template with one type +// parameter, as a type. TemplateSel::Bind::type is defined +// as the type Tmpl. This allows us to actually instantiate the +// template "selected" by TemplateSel. +// +// This trick is necessary for simulating typedef for class templates, +// which C++ doesn't support directly. +template +struct TemplateSel { + template + struct Bind { + typedef Tmpl type; + }; +}; + +# define GTEST_BIND_(TmplSel, T) \ + TmplSel::template Bind::type + +// A unique struct template used as the default value for the +// arguments of class template Templates. This allows us to simulate +// variadic templates (e.g. Templates, Templates, +// and etc), which C++ doesn't support directly. +template +struct NoneT {}; + +// The following family of struct and struct templates are used to +// represent template lists. In particular, TemplatesN represents a list of N templates (T1, T2, ..., and TN). Except +// for Templates0, every struct in the family has two member types: +// Head for the selector of the first template in the list, and Tail +// for the rest of the list. + +// The empty template list. +struct Templates0 {}; + +// Template lists of length 1, 2, 3, and so on. + +template +struct Templates1 { + typedef TemplateSel Head; + typedef Templates0 Tail; +}; +template +struct Templates2 { + typedef TemplateSel Head; + typedef Templates1 Tail; +}; + +template +struct Templates3 { + typedef TemplateSel Head; + typedef Templates2 Tail; +}; + +template +struct Templates4 { + typedef TemplateSel Head; + typedef Templates3 Tail; +}; + +template +struct Templates5 { + typedef TemplateSel Head; + typedef Templates4 Tail; +}; + +template +struct Templates6 { + typedef TemplateSel Head; + typedef Templates5 Tail; +}; + +template +struct Templates7 { + typedef TemplateSel Head; + typedef Templates6 Tail; +}; + +template +struct Templates8 { + typedef TemplateSel Head; + typedef Templates7 Tail; +}; + +template +struct Templates9 { + typedef TemplateSel Head; + typedef Templates8 Tail; +}; + +template +struct Templates10 { + typedef TemplateSel Head; + typedef Templates9 Tail; +}; + +template +struct Templates11 { + typedef TemplateSel Head; + typedef Templates10 Tail; +}; + +template +struct Templates12 { + typedef TemplateSel Head; + typedef Templates11 Tail; +}; + +template +struct Templates13 { + typedef TemplateSel Head; + typedef Templates12 Tail; +}; + +template +struct Templates14 { + typedef TemplateSel Head; + typedef Templates13 Tail; +}; + +template +struct Templates15 { + typedef TemplateSel Head; + typedef Templates14 Tail; +}; + +template +struct Templates16 { + typedef TemplateSel Head; + typedef Templates15 Tail; +}; + +template +struct Templates17 { + typedef TemplateSel Head; + typedef Templates16 Tail; +}; + +template +struct Templates18 { + typedef TemplateSel Head; + typedef Templates17 Tail; +}; + +template +struct Templates19 { + typedef TemplateSel Head; + typedef Templates18 Tail; +}; + +template +struct Templates20 { + typedef TemplateSel Head; + typedef Templates19 Tail; +}; + +template +struct Templates21 { + typedef TemplateSel Head; + typedef Templates20 Tail; +}; + +template +struct Templates22 { + typedef TemplateSel Head; + typedef Templates21 Tail; +}; + +template +struct Templates23 { + typedef TemplateSel Head; + typedef Templates22 Tail; +}; + +template +struct Templates24 { + typedef TemplateSel Head; + typedef Templates23 Tail; +}; + +template +struct Templates25 { + typedef TemplateSel Head; + typedef Templates24 Tail; +}; + +template +struct Templates26 { + typedef TemplateSel Head; + typedef Templates25 Tail; +}; + +template +struct Templates27 { + typedef TemplateSel Head; + typedef Templates26 Tail; +}; + +template +struct Templates28 { + typedef TemplateSel Head; + typedef Templates27 Tail; +}; + +template +struct Templates29 { + typedef TemplateSel Head; + typedef Templates28 Tail; +}; + +template +struct Templates30 { + typedef TemplateSel Head; + typedef Templates29 Tail; +}; + +template +struct Templates31 { + typedef TemplateSel Head; + typedef Templates30 Tail; +}; + +template +struct Templates32 { + typedef TemplateSel Head; + typedef Templates31 Tail; +}; + +template +struct Templates33 { + typedef TemplateSel Head; + typedef Templates32 Tail; +}; + +template +struct Templates34 { + typedef TemplateSel Head; + typedef Templates33 Tail; +}; + +template +struct Templates35 { + typedef TemplateSel Head; + typedef Templates34 Tail; +}; + +template +struct Templates36 { + typedef TemplateSel Head; + typedef Templates35 Tail; +}; + +template +struct Templates37 { + typedef TemplateSel Head; + typedef Templates36 Tail; +}; + +template +struct Templates38 { + typedef TemplateSel Head; + typedef Templates37 Tail; +}; + +template +struct Templates39 { + typedef TemplateSel Head; + typedef Templates38 Tail; +}; + +template +struct Templates40 { + typedef TemplateSel Head; + typedef Templates39 Tail; +}; + +template +struct Templates41 { + typedef TemplateSel Head; + typedef Templates40 Tail; +}; + +template +struct Templates42 { + typedef TemplateSel Head; + typedef Templates41 Tail; +}; + +template +struct Templates43 { + typedef TemplateSel Head; + typedef Templates42 Tail; +}; + +template +struct Templates44 { + typedef TemplateSel Head; + typedef Templates43 Tail; +}; + +template +struct Templates45 { + typedef TemplateSel Head; + typedef Templates44 Tail; +}; + +template +struct Templates46 { + typedef TemplateSel Head; + typedef Templates45 Tail; +}; + +template +struct Templates47 { + typedef TemplateSel Head; + typedef Templates46 Tail; +}; + +template +struct Templates48 { + typedef TemplateSel Head; + typedef Templates47 Tail; +}; + +template +struct Templates49 { + typedef TemplateSel Head; + typedef Templates48 Tail; +}; + +template +struct Templates50 { + typedef TemplateSel Head; + typedef Templates49 Tail; +}; + + +// We don't want to require the users to write TemplatesN<...> directly, +// as that would require them to count the length. Templates<...> is much +// easier to write, but generates horrible messages when there is a +// compiler error, as gcc insists on printing out each template +// argument, even if it has the default value (this means Templates +// will appear as Templates in the compiler +// errors). +// +// Our solution is to combine the best part of the two approaches: a +// user would write Templates, and Google Test will translate +// that to TemplatesN internally to make error messages +// readable. The translation is done by the 'type' member of the +// Templates template. +template +struct Templates { + typedef Templates50 type; +}; + +template <> +struct Templates { + typedef Templates0 type; +}; +template +struct Templates { + typedef Templates1 type; +}; +template +struct Templates { + typedef Templates2 type; +}; +template +struct Templates { + typedef Templates3 type; +}; +template +struct Templates { + typedef Templates4 type; +}; +template +struct Templates { + typedef Templates5 type; +}; +template +struct Templates { + typedef Templates6 type; +}; +template +struct Templates { + typedef Templates7 type; +}; +template +struct Templates { + typedef Templates8 type; +}; +template +struct Templates { + typedef Templates9 type; +}; +template +struct Templates { + typedef Templates10 type; +}; +template +struct Templates { + typedef Templates11 type; +}; +template +struct Templates { + typedef Templates12 type; +}; +template +struct Templates { + typedef Templates13 type; +}; +template +struct Templates { + typedef Templates14 type; +}; +template +struct Templates { + typedef Templates15 type; +}; +template +struct Templates { + typedef Templates16 type; +}; +template +struct Templates { + typedef Templates17 type; +}; +template +struct Templates { + typedef Templates18 type; +}; +template +struct Templates { + typedef Templates19 type; +}; +template +struct Templates { + typedef Templates20 type; +}; +template +struct Templates { + typedef Templates21 type; +}; +template +struct Templates { + typedef Templates22 type; +}; +template +struct Templates { + typedef Templates23 type; +}; +template +struct Templates { + typedef Templates24 type; +}; +template +struct Templates { + typedef Templates25 type; +}; +template +struct Templates { + typedef Templates26 type; +}; +template +struct Templates { + typedef Templates27 type; +}; +template +struct Templates { + typedef Templates28 type; +}; +template +struct Templates { + typedef Templates29 type; +}; +template +struct Templates { + typedef Templates30 type; +}; +template +struct Templates { + typedef Templates31 type; +}; +template +struct Templates { + typedef Templates32 type; +}; +template +struct Templates { + typedef Templates33 type; +}; +template +struct Templates { + typedef Templates34 type; +}; +template +struct Templates { + typedef Templates35 type; +}; +template +struct Templates { + typedef Templates36 type; +}; +template +struct Templates { + typedef Templates37 type; +}; +template +struct Templates { + typedef Templates38 type; +}; +template +struct Templates { + typedef Templates39 type; +}; +template +struct Templates { + typedef Templates40 type; +}; +template +struct Templates { + typedef Templates41 type; +}; +template +struct Templates { + typedef Templates42 type; +}; +template +struct Templates { + typedef Templates43 type; +}; +template +struct Templates { + typedef Templates44 type; +}; +template +struct Templates { + typedef Templates45 type; +}; +template +struct Templates { + typedef Templates46 type; +}; +template +struct Templates { + typedef Templates47 type; +}; +template +struct Templates { + typedef Templates48 type; +}; +template +struct Templates { + typedef Templates49 type; +}; + +// The TypeList template makes it possible to use either a single type +// or a Types<...> list in TYPED_TEST_CASE() and +// INSTANTIATE_TYPED_TEST_CASE_P(). + +template +struct TypeList { + typedef Types1 type; +}; + +template +struct TypeList > { + typedef typename Types::type type; +}; + +#endif // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_ + +// Due to C++ preprocessor weirdness, we need double indirection to +// concatenate two tokens when one of them is __LINE__. Writing +// +// foo ## __LINE__ +// +// will result in the token foo__LINE__, instead of foo followed by +// the current line number. For more details, see +// http://www.parashift.com/c++-faq-lite/misc-technical-issues.html#faq-39.6 +#define GTEST_CONCAT_TOKEN_(foo, bar) GTEST_CONCAT_TOKEN_IMPL_(foo, bar) +#define GTEST_CONCAT_TOKEN_IMPL_(foo, bar) foo ## bar + +class ProtocolMessage; +namespace proto2 { class Message; } + +namespace testing { + +// Forward declarations. + +class AssertionResult; // Result of an assertion. +class Message; // Represents a failure message. +class Test; // Represents a test. +class TestInfo; // Information about a test. +class TestPartResult; // Result of a test part. +class UnitTest; // A collection of test cases. + +template +::std::string PrintToString(const T& value); + +namespace internal { + +struct TraceInfo; // Information about a trace point. +class ScopedTrace; // Implements scoped trace. +class TestInfoImpl; // Opaque implementation of TestInfo +class UnitTestImpl; // Opaque implementation of UnitTest + +// How many times InitGoogleTest() has been called. +GTEST_API_ extern int g_init_gtest_count; + +// The text used in failure messages to indicate the start of the +// stack trace. +GTEST_API_ extern const char kStackTraceMarker[]; + +// Two overloaded helpers for checking at compile time whether an +// expression is a null pointer literal (i.e. NULL or any 0-valued +// compile-time integral constant). Their return values have +// different sizes, so we can use sizeof() to test which version is +// picked by the compiler. These helpers have no implementations, as +// we only need their signatures. +// +// Given IsNullLiteralHelper(x), the compiler will pick the first +// version if x can be implicitly converted to Secret*, and pick the +// second version otherwise. Since Secret is a secret and incomplete +// type, the only expression a user can write that has type Secret* is +// a null pointer literal. Therefore, we know that x is a null +// pointer literal if and only if the first version is picked by the +// compiler. +char IsNullLiteralHelper(Secret* p); +char (&IsNullLiteralHelper(...))[2]; // NOLINT + +// A compile-time bool constant that is true if and only if x is a +// null pointer literal (i.e. NULL or any 0-valued compile-time +// integral constant). +#ifdef GTEST_ELLIPSIS_NEEDS_POD_ +// We lose support for NULL detection where the compiler doesn't like +// passing non-POD classes through ellipsis (...). +# define GTEST_IS_NULL_LITERAL_(x) false +#else +# define GTEST_IS_NULL_LITERAL_(x) \ + (sizeof(::testing::internal::IsNullLiteralHelper(x)) == 1) +#endif // GTEST_ELLIPSIS_NEEDS_POD_ + +// Appends the user-supplied message to the Google-Test-generated message. +GTEST_API_ std::string AppendUserMessage( + const std::string& gtest_msg, const Message& user_msg); + +#if GTEST_HAS_EXCEPTIONS + +// This exception is thrown by (and only by) a failed Google Test +// assertion when GTEST_FLAG(throw_on_failure) is true (if exceptions +// are enabled). We derive it from std::runtime_error, which is for +// errors presumably detectable only at run time. Since +// std::runtime_error inherits from std::exception, many testing +// frameworks know how to extract and print the message inside it. +class GTEST_API_ GoogleTestFailureException : public ::std::runtime_error { + public: + explicit GoogleTestFailureException(const TestPartResult& failure); +}; + +#endif // GTEST_HAS_EXCEPTIONS + +// A helper class for creating scoped traces in user programs. +class GTEST_API_ ScopedTrace { + public: + // The c'tor pushes the given source file location and message onto + // a trace stack maintained by Google Test. + ScopedTrace(const char* file, int line, const Message& message); + + // The d'tor pops the info pushed by the c'tor. + // + // Note that the d'tor is not virtual in order to be efficient. + // Don't inherit from ScopedTrace! + ~ScopedTrace(); + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedTrace); +} GTEST_ATTRIBUTE_UNUSED_; // A ScopedTrace object does its job in its + // c'tor and d'tor. Therefore it doesn't + // need to be used otherwise. + +// Constructs and returns the message for an equality assertion +// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure. +// +// The first four parameters are the expressions used in the assertion +// and their values, as strings. For example, for ASSERT_EQ(foo, bar) +// where foo is 5 and bar is 6, we have: +// +// expected_expression: "foo" +// actual_expression: "bar" +// expected_value: "5" +// actual_value: "6" +// +// The ignoring_case parameter is true iff the assertion is a +// *_STRCASEEQ*. When it's true, the string " (ignoring case)" will +// be inserted into the message. +GTEST_API_ AssertionResult EqFailure(const char* expected_expression, + const char* actual_expression, + const std::string& expected_value, + const std::string& actual_value, + bool ignoring_case); + +// Constructs a failure message for Boolean assertions such as EXPECT_TRUE. +GTEST_API_ std::string GetBoolAssertionFailureMessage( + const AssertionResult& assertion_result, + const char* expression_text, + const char* actual_predicate_value, + const char* expected_predicate_value); + +// This template class represents an IEEE floating-point number +// (either single-precision or double-precision, depending on the +// template parameters). +// +// The purpose of this class is to do more sophisticated number +// comparison. (Due to round-off error, etc, it's very unlikely that +// two floating-points will be equal exactly. Hence a naive +// comparison by the == operation often doesn't work.) +// +// Format of IEEE floating-point: +// +// The most-significant bit being the leftmost, an IEEE +// floating-point looks like +// +// sign_bit exponent_bits fraction_bits +// +// Here, sign_bit is a single bit that designates the sign of the +// number. +// +// For float, there are 8 exponent bits and 23 fraction bits. +// +// For double, there are 11 exponent bits and 52 fraction bits. +// +// More details can be found at +// http://en.wikipedia.org/wiki/IEEE_floating-point_standard. +// +// Template parameter: +// +// RawType: the raw floating-point type (either float or double) +template +class FloatingPoint { + public: + // Defines the unsigned integer type that has the same size as the + // floating point number. + typedef typename TypeWithSize::UInt Bits; + + // Constants. + + // # of bits in a number. + static const size_t kBitCount = 8*sizeof(RawType); + + // # of fraction bits in a number. + static const size_t kFractionBitCount = + std::numeric_limits::digits - 1; + + // # of exponent bits in a number. + static const size_t kExponentBitCount = kBitCount - 1 - kFractionBitCount; + + // The mask for the sign bit. + static const Bits kSignBitMask = static_cast(1) << (kBitCount - 1); + + // The mask for the fraction bits. + static const Bits kFractionBitMask = + ~static_cast(0) >> (kExponentBitCount + 1); + + // The mask for the exponent bits. + static const Bits kExponentBitMask = ~(kSignBitMask | kFractionBitMask); + + // How many ULP's (Units in the Last Place) we want to tolerate when + // comparing two numbers. The larger the value, the more error we + // allow. A 0 value means that two numbers must be exactly the same + // to be considered equal. + // + // The maximum error of a single floating-point operation is 0.5 + // units in the last place. On Intel CPU's, all floating-point + // calculations are done with 80-bit precision, while double has 64 + // bits. Therefore, 4 should be enough for ordinary use. + // + // See the following article for more details on ULP: + // http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/ + static const size_t kMaxUlps = 4; + + // Constructs a FloatingPoint from a raw floating-point number. + // + // On an Intel CPU, passing a non-normalized NAN (Not a Number) + // around may change its bits, although the new value is guaranteed + // to be also a NAN. Therefore, don't expect this constructor to + // preserve the bits in x when x is a NAN. + explicit FloatingPoint(const RawType& x) { u_.value_ = x; } + + // Static methods + + // Reinterprets a bit pattern as a floating-point number. + // + // This function is needed to test the AlmostEquals() method. + static RawType ReinterpretBits(const Bits bits) { + FloatingPoint fp(0); + fp.u_.bits_ = bits; + return fp.u_.value_; + } + + // Returns the floating-point number that represent positive infinity. + static RawType Infinity() { + return ReinterpretBits(kExponentBitMask); + } + + // Returns the maximum representable finite floating-point number. + static RawType Max(); + + // Non-static methods + + // Returns the bits that represents this number. + const Bits &bits() const { return u_.bits_; } + + // Returns the exponent bits of this number. + Bits exponent_bits() const { return kExponentBitMask & u_.bits_; } + + // Returns the fraction bits of this number. + Bits fraction_bits() const { return kFractionBitMask & u_.bits_; } + + // Returns the sign bit of this number. + Bits sign_bit() const { return kSignBitMask & u_.bits_; } + + // Returns true iff this is NAN (not a number). + bool is_nan() const { + // It's a NAN if the exponent bits are all ones and the fraction + // bits are not entirely zeros. + return (exponent_bits() == kExponentBitMask) && (fraction_bits() != 0); + } + + // Returns true iff this number is at most kMaxUlps ULP's away from + // rhs. In particular, this function: + // + // - returns false if either number is (or both are) NAN. + // - treats really large numbers as almost equal to infinity. + // - thinks +0.0 and -0.0 are 0 DLP's apart. + bool AlmostEquals(const FloatingPoint& rhs) const { + // The IEEE standard says that any comparison operation involving + // a NAN must return false. + if (is_nan() || rhs.is_nan()) return false; + + return DistanceBetweenSignAndMagnitudeNumbers(u_.bits_, rhs.u_.bits_) + <= kMaxUlps; + } + + private: + // The data type used to store the actual floating-point number. + union FloatingPointUnion { + RawType value_; // The raw floating-point number. + Bits bits_; // The bits that represent the number. + }; + + // Converts an integer from the sign-and-magnitude representation to + // the biased representation. More precisely, let N be 2 to the + // power of (kBitCount - 1), an integer x is represented by the + // unsigned number x + N. + // + // For instance, + // + // -N + 1 (the most negative number representable using + // sign-and-magnitude) is represented by 1; + // 0 is represented by N; and + // N - 1 (the biggest number representable using + // sign-and-magnitude) is represented by 2N - 1. + // + // Read http://en.wikipedia.org/wiki/Signed_number_representations + // for more details on signed number representations. + static Bits SignAndMagnitudeToBiased(const Bits &sam) { + if (kSignBitMask & sam) { + // sam represents a negative number. + return ~sam + 1; + } else { + // sam represents a positive number. + return kSignBitMask | sam; + } + } + + // Given two numbers in the sign-and-magnitude representation, + // returns the distance between them as an unsigned number. + static Bits DistanceBetweenSignAndMagnitudeNumbers(const Bits &sam1, + const Bits &sam2) { + const Bits biased1 = SignAndMagnitudeToBiased(sam1); + const Bits biased2 = SignAndMagnitudeToBiased(sam2); + return (biased1 >= biased2) ? (biased1 - biased2) : (biased2 - biased1); + } + + FloatingPointUnion u_; +}; + +// We cannot use std::numeric_limits::max() as it clashes with the max() +// macro defined by . +template <> +inline float FloatingPoint::Max() { return FLT_MAX; } +template <> +inline double FloatingPoint::Max() { return DBL_MAX; } + +// Typedefs the instances of the FloatingPoint template class that we +// care to use. +typedef FloatingPoint Float; +typedef FloatingPoint Double; + +// In order to catch the mistake of putting tests that use different +// test fixture classes in the same test case, we need to assign +// unique IDs to fixture classes and compare them. The TypeId type is +// used to hold such IDs. The user should treat TypeId as an opaque +// type: the only operation allowed on TypeId values is to compare +// them for equality using the == operator. +typedef const void* TypeId; + +template +class TypeIdHelper { + public: + // dummy_ must not have a const type. Otherwise an overly eager + // compiler (e.g. MSVC 7.1 & 8.0) may try to merge + // TypeIdHelper::dummy_ for different Ts as an "optimization". + static bool dummy_; +}; + +template +bool TypeIdHelper::dummy_ = false; + +// GetTypeId() returns the ID of type T. Different values will be +// returned for different types. Calling the function twice with the +// same type argument is guaranteed to return the same ID. +template +TypeId GetTypeId() { + // The compiler is required to allocate a different + // TypeIdHelper::dummy_ variable for each T used to instantiate + // the template. Therefore, the address of dummy_ is guaranteed to + // be unique. + return &(TypeIdHelper::dummy_); +} + +// Returns the type ID of ::testing::Test. Always call this instead +// of GetTypeId< ::testing::Test>() to get the type ID of +// ::testing::Test, as the latter may give the wrong result due to a +// suspected linker bug when compiling Google Test as a Mac OS X +// framework. +GTEST_API_ TypeId GetTestTypeId(); + +// Defines the abstract factory interface that creates instances +// of a Test object. +class TestFactoryBase { + public: + virtual ~TestFactoryBase() {} + + // Creates a test instance to run. The instance is both created and destroyed + // within TestInfoImpl::Run() + virtual Test* CreateTest() = 0; + + protected: + TestFactoryBase() {} + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestFactoryBase); +}; + +// This class provides implementation of TeastFactoryBase interface. +// It is used in TEST and TEST_F macros. +template +class TestFactoryImpl : public TestFactoryBase { + public: + virtual Test* CreateTest() { return new TestClass; } +}; + +#if GTEST_OS_WINDOWS + +// Predicate-formatters for implementing the HRESULT checking macros +// {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED} +// We pass a long instead of HRESULT to avoid causing an +// include dependency for the HRESULT type. +GTEST_API_ AssertionResult IsHRESULTSuccess(const char* expr, + long hr); // NOLINT +GTEST_API_ AssertionResult IsHRESULTFailure(const char* expr, + long hr); // NOLINT + +#endif // GTEST_OS_WINDOWS + +// Types of SetUpTestCase() and TearDownTestCase() functions. +typedef void (*SetUpTestCaseFunc)(); +typedef void (*TearDownTestCaseFunc)(); + +// Creates a new TestInfo object and registers it with Google Test; +// returns the created object. +// +// Arguments: +// +// test_case_name: name of the test case +// name: name of the test +// type_param the name of the test's type parameter, or NULL if +// this is not a typed or a type-parameterized test. +// value_param text representation of the test's value parameter, +// or NULL if this is not a type-parameterized test. +// fixture_class_id: ID of the test fixture class +// set_up_tc: pointer to the function that sets up the test case +// tear_down_tc: pointer to the function that tears down the test case +// factory: pointer to the factory that creates a test object. +// The newly created TestInfo instance will assume +// ownership of the factory object. +GTEST_API_ TestInfo* MakeAndRegisterTestInfo( + const char* test_case_name, + const char* name, + const char* type_param, + const char* value_param, + TypeId fixture_class_id, + SetUpTestCaseFunc set_up_tc, + TearDownTestCaseFunc tear_down_tc, + TestFactoryBase* factory); + +// If *pstr starts with the given prefix, modifies *pstr to be right +// past the prefix and returns true; otherwise leaves *pstr unchanged +// and returns false. None of pstr, *pstr, and prefix can be NULL. +GTEST_API_ bool SkipPrefix(const char* prefix, const char** pstr); + +#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P + +// State of the definition of a type-parameterized test case. +class GTEST_API_ TypedTestCasePState { + public: + TypedTestCasePState() : registered_(false) {} + + // Adds the given test name to defined_test_names_ and return true + // if the test case hasn't been registered; otherwise aborts the + // program. + bool AddTestName(const char* file, int line, const char* case_name, + const char* test_name) { + if (registered_) { + fprintf(stderr, "%s Test %s must be defined before " + "REGISTER_TYPED_TEST_CASE_P(%s, ...).\n", + FormatFileLocation(file, line).c_str(), test_name, case_name); + fflush(stderr); + posix::Abort(); + } + defined_test_names_.insert(test_name); + return true; + } + + // Verifies that registered_tests match the test names in + // defined_test_names_; returns registered_tests if successful, or + // aborts the program otherwise. + const char* VerifyRegisteredTestNames( + const char* file, int line, const char* registered_tests); + + private: + bool registered_; + ::std::set defined_test_names_; +}; + +// Skips to the first non-space char after the first comma in 'str'; +// returns NULL if no comma is found in 'str'. +inline const char* SkipComma(const char* str) { + const char* comma = strchr(str, ','); + if (comma == NULL) { + return NULL; + } + while (IsSpace(*(++comma))) {} + return comma; +} + +// Returns the prefix of 'str' before the first comma in it; returns +// the entire string if it contains no comma. +inline std::string GetPrefixUntilComma(const char* str) { + const char* comma = strchr(str, ','); + return comma == NULL ? str : std::string(str, comma); +} + +// TypeParameterizedTest::Register() +// registers a list of type-parameterized tests with Google Test. The +// return value is insignificant - we just need to return something +// such that we can call this function in a namespace scope. +// +// Implementation note: The GTEST_TEMPLATE_ macro declares a template +// template parameter. It's defined in gtest-type-util.h. +template +class TypeParameterizedTest { + public: + // 'index' is the index of the test in the type list 'Types' + // specified in INSTANTIATE_TYPED_TEST_CASE_P(Prefix, TestCase, + // Types). Valid values for 'index' are [0, N - 1] where N is the + // length of Types. + static bool Register(const char* prefix, const char* case_name, + const char* test_names, int index) { + typedef typename Types::Head Type; + typedef Fixture FixtureClass; + typedef typename GTEST_BIND_(TestSel, Type) TestClass; + + // First, registers the first type-parameterized test in the type + // list. + MakeAndRegisterTestInfo( + (std::string(prefix) + (prefix[0] == '\0' ? "" : "/") + case_name + "/" + + StreamableToString(index)).c_str(), + GetPrefixUntilComma(test_names).c_str(), + GetTypeName().c_str(), + NULL, // No value parameter. + GetTypeId(), + TestClass::SetUpTestCase, + TestClass::TearDownTestCase, + new TestFactoryImpl); + + // Next, recurses (at compile time) with the tail of the type list. + return TypeParameterizedTest + ::Register(prefix, case_name, test_names, index + 1); + } +}; + +// The base case for the compile time recursion. +template +class TypeParameterizedTest { + public: + static bool Register(const char* /*prefix*/, const char* /*case_name*/, + const char* /*test_names*/, int /*index*/) { + return true; + } +}; + +// TypeParameterizedTestCase::Register() +// registers *all combinations* of 'Tests' and 'Types' with Google +// Test. The return value is insignificant - we just need to return +// something such that we can call this function in a namespace scope. +template +class TypeParameterizedTestCase { + public: + static bool Register(const char* prefix, const char* case_name, + const char* test_names) { + typedef typename Tests::Head Head; + + // First, register the first test in 'Test' for each type in 'Types'. + TypeParameterizedTest::Register( + prefix, case_name, test_names, 0); + + // Next, recurses (at compile time) with the tail of the test list. + return TypeParameterizedTestCase + ::Register(prefix, case_name, SkipComma(test_names)); + } +}; + +// The base case for the compile time recursion. +template +class TypeParameterizedTestCase { + public: + static bool Register(const char* /*prefix*/, const char* /*case_name*/, + const char* /*test_names*/) { + return true; + } +}; + +#endif // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P + +// Returns the current OS stack trace as an std::string. +// +// The maximum number of stack frames to be included is specified by +// the gtest_stack_trace_depth flag. The skip_count parameter +// specifies the number of top frames to be skipped, which doesn't +// count against the number of frames to be included. +// +// For example, if Foo() calls Bar(), which in turn calls +// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in +// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't. +GTEST_API_ std::string GetCurrentOsStackTraceExceptTop( + UnitTest* unit_test, int skip_count); + +// Helpers for suppressing warnings on unreachable code or constant +// condition. + +// Always returns true. +GTEST_API_ bool AlwaysTrue(); + +// Always returns false. +inline bool AlwaysFalse() { return !AlwaysTrue(); } + +// Helper for suppressing false warning from Clang on a const char* +// variable declared in a conditional expression always being NULL in +// the else branch. +struct GTEST_API_ ConstCharPtr { + ConstCharPtr(const char* str) : value(str) {} + operator bool() const { return true; } + const char* value; +}; + +// A simple Linear Congruential Generator for generating random +// numbers with a uniform distribution. Unlike rand() and srand(), it +// doesn't use global state (and therefore can't interfere with user +// code). Unlike rand_r(), it's portable. An LCG isn't very random, +// but it's good enough for our purposes. +class GTEST_API_ Random { + public: + static const UInt32 kMaxRange = 1u << 31; + + explicit Random(UInt32 seed) : state_(seed) {} + + void Reseed(UInt32 seed) { state_ = seed; } + + // Generates a random number from [0, range). Crashes if 'range' is + // 0 or greater than kMaxRange. + UInt32 Generate(UInt32 range); + + private: + UInt32 state_; + GTEST_DISALLOW_COPY_AND_ASSIGN_(Random); +}; + +// Defining a variable of type CompileAssertTypesEqual will cause a +// compiler error iff T1 and T2 are different types. +template +struct CompileAssertTypesEqual; + +template +struct CompileAssertTypesEqual { +}; + +// Removes the reference from a type if it is a reference type, +// otherwise leaves it unchanged. This is the same as +// tr1::remove_reference, which is not widely available yet. +template +struct RemoveReference { typedef T type; }; // NOLINT +template +struct RemoveReference { typedef T type; }; // NOLINT + +// A handy wrapper around RemoveReference that works when the argument +// T depends on template parameters. +#define GTEST_REMOVE_REFERENCE_(T) \ + typename ::testing::internal::RemoveReference::type + +// Removes const from a type if it is a const type, otherwise leaves +// it unchanged. This is the same as tr1::remove_const, which is not +// widely available yet. +template +struct RemoveConst { typedef T type; }; // NOLINT +template +struct RemoveConst { typedef T type; }; // NOLINT + +// MSVC 8.0, Sun C++, and IBM XL C++ have a bug which causes the above +// definition to fail to remove the const in 'const int[3]' and 'const +// char[3][4]'. The following specialization works around the bug. +template +struct RemoveConst { + typedef typename RemoveConst::type type[N]; +}; + +#if defined(_MSC_VER) && _MSC_VER < 1400 +// This is the only specialization that allows VC++ 7.1 to remove const in +// 'const int[3] and 'const int[3][4]'. However, it causes trouble with GCC +// and thus needs to be conditionally compiled. +template +struct RemoveConst { + typedef typename RemoveConst::type type[N]; +}; +#endif + +// A handy wrapper around RemoveConst that works when the argument +// T depends on template parameters. +#define GTEST_REMOVE_CONST_(T) \ + typename ::testing::internal::RemoveConst::type + +// Turns const U&, U&, const U, and U all into U. +#define GTEST_REMOVE_REFERENCE_AND_CONST_(T) \ + GTEST_REMOVE_CONST_(GTEST_REMOVE_REFERENCE_(T)) + +// Adds reference to a type if it is not a reference type, +// otherwise leaves it unchanged. This is the same as +// tr1::add_reference, which is not widely available yet. +template +struct AddReference { typedef T& type; }; // NOLINT +template +struct AddReference { typedef T& type; }; // NOLINT + +// A handy wrapper around AddReference that works when the argument T +// depends on template parameters. +#define GTEST_ADD_REFERENCE_(T) \ + typename ::testing::internal::AddReference::type + +// Adds a reference to const on top of T as necessary. For example, +// it transforms +// +// char ==> const char& +// const char ==> const char& +// char& ==> const char& +// const char& ==> const char& +// +// The argument T must depend on some template parameters. +#define GTEST_REFERENCE_TO_CONST_(T) \ + GTEST_ADD_REFERENCE_(const GTEST_REMOVE_REFERENCE_(T)) + +// ImplicitlyConvertible::value is a compile-time bool +// constant that's true iff type From can be implicitly converted to +// type To. +template +class ImplicitlyConvertible { + private: + // We need the following helper functions only for their types. + // They have no implementations. + + // MakeFrom() is an expression whose type is From. We cannot simply + // use From(), as the type From may not have a public default + // constructor. + static From MakeFrom(); + + // These two functions are overloaded. Given an expression + // Helper(x), the compiler will pick the first version if x can be + // implicitly converted to type To; otherwise it will pick the + // second version. + // + // The first version returns a value of size 1, and the second + // version returns a value of size 2. Therefore, by checking the + // size of Helper(x), which can be done at compile time, we can tell + // which version of Helper() is used, and hence whether x can be + // implicitly converted to type To. + static char Helper(To); + static char (&Helper(...))[2]; // NOLINT + + // We have to put the 'public' section after the 'private' section, + // or MSVC refuses to compile the code. + public: + // MSVC warns about implicitly converting from double to int for + // possible loss of data, so we need to temporarily disable the + // warning. +#ifdef _MSC_VER +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4244) // Temporarily disables warning 4244. + + static const bool value = + sizeof(Helper(ImplicitlyConvertible::MakeFrom())) == 1; +# pragma warning(pop) // Restores the warning state. +#elif defined(__BORLANDC__) + // C++Builder cannot use member overload resolution during template + // instantiation. The simplest workaround is to use its C++0x type traits + // functions (C++Builder 2009 and above only). + static const bool value = __is_convertible(From, To); +#else + static const bool value = + sizeof(Helper(ImplicitlyConvertible::MakeFrom())) == 1; +#endif // _MSV_VER +}; +template +const bool ImplicitlyConvertible::value; + +// IsAProtocolMessage::value is a compile-time bool constant that's +// true iff T is type ProtocolMessage, proto2::Message, or a subclass +// of those. +template +struct IsAProtocolMessage + : public bool_constant< + ImplicitlyConvertible::value || + ImplicitlyConvertible::value> { +}; + +// When the compiler sees expression IsContainerTest(0), if C is an +// STL-style container class, the first overload of IsContainerTest +// will be viable (since both C::iterator* and C::const_iterator* are +// valid types and NULL can be implicitly converted to them). It will +// be picked over the second overload as 'int' is a perfect match for +// the type of argument 0. If C::iterator or C::const_iterator is not +// a valid type, the first overload is not viable, and the second +// overload will be picked. Therefore, we can determine whether C is +// a container class by checking the type of IsContainerTest(0). +// The value of the expression is insignificant. +// +// Note that we look for both C::iterator and C::const_iterator. The +// reason is that C++ injects the name of a class as a member of the +// class itself (e.g. you can refer to class iterator as either +// 'iterator' or 'iterator::iterator'). If we look for C::iterator +// only, for example, we would mistakenly think that a class named +// iterator is an STL container. +// +// Also note that the simpler approach of overloading +// IsContainerTest(typename C::const_iterator*) and +// IsContainerTest(...) doesn't work with Visual Age C++ and Sun C++. +typedef int IsContainer; +template +IsContainer IsContainerTest(int /* dummy */, + typename C::iterator* /* it */ = NULL, + typename C::const_iterator* /* const_it */ = NULL) { + return 0; +} + +typedef char IsNotContainer; +template +IsNotContainer IsContainerTest(long /* dummy */) { return '\0'; } + +// EnableIf::type is void when 'Cond' is true, and +// undefined when 'Cond' is false. To use SFINAE to make a function +// overload only apply when a particular expression is true, add +// "typename EnableIf::type* = 0" as the last parameter. +template struct EnableIf; +template<> struct EnableIf { typedef void type; }; // NOLINT + +// Utilities for native arrays. + +// ArrayEq() compares two k-dimensional native arrays using the +// elements' operator==, where k can be any integer >= 0. When k is +// 0, ArrayEq() degenerates into comparing a single pair of values. + +template +bool ArrayEq(const T* lhs, size_t size, const U* rhs); + +// This generic version is used when k is 0. +template +inline bool ArrayEq(const T& lhs, const U& rhs) { return lhs == rhs; } + +// This overload is used when k >= 1. +template +inline bool ArrayEq(const T(&lhs)[N], const U(&rhs)[N]) { + return internal::ArrayEq(lhs, N, rhs); +} + +// This helper reduces code bloat. If we instead put its logic inside +// the previous ArrayEq() function, arrays with different sizes would +// lead to different copies of the template code. +template +bool ArrayEq(const T* lhs, size_t size, const U* rhs) { + for (size_t i = 0; i != size; i++) { + if (!internal::ArrayEq(lhs[i], rhs[i])) + return false; + } + return true; +} + +// Finds the first element in the iterator range [begin, end) that +// equals elem. Element may be a native array type itself. +template +Iter ArrayAwareFind(Iter begin, Iter end, const Element& elem) { + for (Iter it = begin; it != end; ++it) { + if (internal::ArrayEq(*it, elem)) + return it; + } + return end; +} + +// CopyArray() copies a k-dimensional native array using the elements' +// operator=, where k can be any integer >= 0. When k is 0, +// CopyArray() degenerates into copying a single value. + +template +void CopyArray(const T* from, size_t size, U* to); + +// This generic version is used when k is 0. +template +inline void CopyArray(const T& from, U* to) { *to = from; } + +// This overload is used when k >= 1. +template +inline void CopyArray(const T(&from)[N], U(*to)[N]) { + internal::CopyArray(from, N, *to); +} + +// This helper reduces code bloat. If we instead put its logic inside +// the previous CopyArray() function, arrays with different sizes +// would lead to different copies of the template code. +template +void CopyArray(const T* from, size_t size, U* to) { + for (size_t i = 0; i != size; i++) { + internal::CopyArray(from[i], to + i); + } +} + +// The relation between an NativeArray object (see below) and the +// native array it represents. +enum RelationToSource { + kReference, // The NativeArray references the native array. + kCopy // The NativeArray makes a copy of the native array and + // owns the copy. +}; + +// Adapts a native array to a read-only STL-style container. Instead +// of the complete STL container concept, this adaptor only implements +// members useful for Google Mock's container matchers. New members +// should be added as needed. To simplify the implementation, we only +// support Element being a raw type (i.e. having no top-level const or +// reference modifier). It's the client's responsibility to satisfy +// this requirement. Element can be an array type itself (hence +// multi-dimensional arrays are supported). +template +class NativeArray { + public: + // STL-style container typedefs. + typedef Element value_type; + typedef Element* iterator; + typedef const Element* const_iterator; + + // Constructs from a native array. + NativeArray(const Element* array, size_t count, RelationToSource relation) { + Init(array, count, relation); + } + + // Copy constructor. + NativeArray(const NativeArray& rhs) { + Init(rhs.array_, rhs.size_, rhs.relation_to_source_); + } + + ~NativeArray() { + // Ensures that the user doesn't instantiate NativeArray with a + // const or reference type. + static_cast(StaticAssertTypeEqHelper()); + if (relation_to_source_ == kCopy) + delete[] array_; + } + + // STL-style container methods. + size_t size() const { return size_; } + const_iterator begin() const { return array_; } + const_iterator end() const { return array_ + size_; } + bool operator==(const NativeArray& rhs) const { + return size() == rhs.size() && + ArrayEq(begin(), size(), rhs.begin()); + } + + private: + // Initializes this object; makes a copy of the input array if + // 'relation' is kCopy. + void Init(const Element* array, size_t a_size, RelationToSource relation) { + if (relation == kReference) { + array_ = array; + } else { + Element* const copy = new Element[a_size]; + CopyArray(array, a_size, copy); + array_ = copy; + } + size_ = a_size; + relation_to_source_ = relation; + } + + const Element* array_; + size_t size_; + RelationToSource relation_to_source_; + + GTEST_DISALLOW_ASSIGN_(NativeArray); +}; + +} // namespace internal +} // namespace testing + +#define GTEST_MESSAGE_AT_(file, line, message, result_type) \ + ::testing::internal::AssertHelper(result_type, file, line, message) \ + = ::testing::Message() + +#define GTEST_MESSAGE_(message, result_type) \ + GTEST_MESSAGE_AT_(__FILE__, __LINE__, message, result_type) + +#define GTEST_FATAL_FAILURE_(message) \ + return GTEST_MESSAGE_(message, ::testing::TestPartResult::kFatalFailure) + +#define GTEST_NONFATAL_FAILURE_(message) \ + GTEST_MESSAGE_(message, ::testing::TestPartResult::kNonFatalFailure) + +#define GTEST_SUCCESS_(message) \ + GTEST_MESSAGE_(message, ::testing::TestPartResult::kSuccess) + +// Suppresses MSVC warnings 4072 (unreachable code) for the code following +// statement if it returns or throws (or doesn't return or throw in some +// situations). +#define GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) \ + if (::testing::internal::AlwaysTrue()) { statement; } + +#define GTEST_TEST_THROW_(statement, expected_exception, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::ConstCharPtr gtest_msg = "") { \ + bool gtest_caught_expected = false; \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } \ + catch (expected_exception const&) { \ + gtest_caught_expected = true; \ + } \ + catch (...) { \ + gtest_msg.value = \ + "Expected: " #statement " throws an exception of type " \ + #expected_exception ".\n Actual: it throws a different type."; \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \ + } \ + if (!gtest_caught_expected) { \ + gtest_msg.value = \ + "Expected: " #statement " throws an exception of type " \ + #expected_exception ".\n Actual: it throws nothing."; \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__): \ + fail(gtest_msg.value) + +#define GTEST_TEST_NO_THROW_(statement, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } \ + catch (...) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__); \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__): \ + fail("Expected: " #statement " doesn't throw an exception.\n" \ + " Actual: it throws.") + +#define GTEST_TEST_ANY_THROW_(statement, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + bool gtest_caught_any = false; \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } \ + catch (...) { \ + gtest_caught_any = true; \ + } \ + if (!gtest_caught_any) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__); \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__): \ + fail("Expected: " #statement " throws an exception.\n" \ + " Actual: it doesn't.") + + +// Implements Boolean test assertions such as EXPECT_TRUE. expression can be +// either a boolean expression or an AssertionResult. text is a textual +// represenation of expression as it was passed into the EXPECT_TRUE. +#define GTEST_TEST_BOOLEAN_(expression, text, actual, expected, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (const ::testing::AssertionResult gtest_ar_ = \ + ::testing::AssertionResult(expression)) \ + ; \ + else \ + fail(::testing::internal::GetBoolAssertionFailureMessage(\ + gtest_ar_, text, #actual, #expected).c_str()) + +#define GTEST_TEST_NO_FATAL_FAILURE_(statement, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + ::testing::internal::HasNewFatalFailureHelper gtest_fatal_failure_checker; \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + if (gtest_fatal_failure_checker.has_new_fatal_failure()) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__); \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__): \ + fail("Expected: " #statement " doesn't generate new fatal " \ + "failures in the current thread.\n" \ + " Actual: it does.") + +// Expands to the name of the class that implements the given test. +#define GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \ + test_case_name##_##test_name##_Test + +// Helper macro for defining tests. +#define GTEST_TEST_(test_case_name, test_name, parent_class, parent_id)\ +class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) : public parent_class {\ + public:\ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {}\ + private:\ + virtual void TestBody();\ + static ::testing::TestInfo* const test_info_ GTEST_ATTRIBUTE_UNUSED_;\ + GTEST_DISALLOW_COPY_AND_ASSIGN_(\ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name));\ +};\ +\ +::testing::TestInfo* const GTEST_TEST_CLASS_NAME_(test_case_name, test_name)\ + ::test_info_ =\ + ::testing::internal::MakeAndRegisterTestInfo(\ + #test_case_name, #test_name, NULL, NULL, \ + (parent_id), \ + parent_class::SetUpTestCase, \ + parent_class::TearDownTestCase, \ + new ::testing::internal::TestFactoryImpl<\ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>);\ +void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody() + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file defines the public API for death tests. It is +// #included by gtest.h so a user doesn't need to include this +// directly. + +#ifndef GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_ +#define GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_ + +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file defines internal utilities needed for implementing +// death tests. They are subject to change without notice. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_ + + +#include + +namespace testing { +namespace internal { + +GTEST_DECLARE_string_(internal_run_death_test); + +// Names of the flags (needed for parsing Google Test flags). +const char kDeathTestStyleFlag[] = "death_test_style"; +const char kDeathTestUseFork[] = "death_test_use_fork"; +const char kInternalRunDeathTestFlag[] = "internal_run_death_test"; + +#if GTEST_HAS_DEATH_TEST + +// DeathTest is a class that hides much of the complexity of the +// GTEST_DEATH_TEST_ macro. It is abstract; its static Create method +// returns a concrete class that depends on the prevailing death test +// style, as defined by the --gtest_death_test_style and/or +// --gtest_internal_run_death_test flags. + +// In describing the results of death tests, these terms are used with +// the corresponding definitions: +// +// exit status: The integer exit information in the format specified +// by wait(2) +// exit code: The integer code passed to exit(3), _exit(2), or +// returned from main() +class GTEST_API_ DeathTest { + public: + // Create returns false if there was an error determining the + // appropriate action to take for the current death test; for example, + // if the gtest_death_test_style flag is set to an invalid value. + // The LastMessage method will return a more detailed message in that + // case. Otherwise, the DeathTest pointer pointed to by the "test" + // argument is set. If the death test should be skipped, the pointer + // is set to NULL; otherwise, it is set to the address of a new concrete + // DeathTest object that controls the execution of the current test. + static bool Create(const char* statement, const RE* regex, + const char* file, int line, DeathTest** test); + DeathTest(); + virtual ~DeathTest() { } + + // A helper class that aborts a death test when it's deleted. + class ReturnSentinel { + public: + explicit ReturnSentinel(DeathTest* test) : test_(test) { } + ~ReturnSentinel() { test_->Abort(TEST_ENCOUNTERED_RETURN_STATEMENT); } + private: + DeathTest* const test_; + GTEST_DISALLOW_COPY_AND_ASSIGN_(ReturnSentinel); + } GTEST_ATTRIBUTE_UNUSED_; + + // An enumeration of possible roles that may be taken when a death + // test is encountered. EXECUTE means that the death test logic should + // be executed immediately. OVERSEE means that the program should prepare + // the appropriate environment for a child process to execute the death + // test, then wait for it to complete. + enum TestRole { OVERSEE_TEST, EXECUTE_TEST }; + + // An enumeration of the three reasons that a test might be aborted. + enum AbortReason { + TEST_ENCOUNTERED_RETURN_STATEMENT, + TEST_THREW_EXCEPTION, + TEST_DID_NOT_DIE + }; + + // Assumes one of the above roles. + virtual TestRole AssumeRole() = 0; + + // Waits for the death test to finish and returns its status. + virtual int Wait() = 0; + + // Returns true if the death test passed; that is, the test process + // exited during the test, its exit status matches a user-supplied + // predicate, and its stderr output matches a user-supplied regular + // expression. + // The user-supplied predicate may be a macro expression rather + // than a function pointer or functor, or else Wait and Passed could + // be combined. + virtual bool Passed(bool exit_status_ok) = 0; + + // Signals that the death test did not die as expected. + virtual void Abort(AbortReason reason) = 0; + + // Returns a human-readable outcome message regarding the outcome of + // the last death test. + static const char* LastMessage(); + + static void set_last_death_test_message(const std::string& message); + + private: + // A string containing a description of the outcome of the last death test. + static std::string last_death_test_message_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(DeathTest); +}; + +// Factory interface for death tests. May be mocked out for testing. +class DeathTestFactory { + public: + virtual ~DeathTestFactory() { } + virtual bool Create(const char* statement, const RE* regex, + const char* file, int line, DeathTest** test) = 0; +}; + +// A concrete DeathTestFactory implementation for normal use. +class DefaultDeathTestFactory : public DeathTestFactory { + public: + virtual bool Create(const char* statement, const RE* regex, + const char* file, int line, DeathTest** test); +}; + +// Returns true if exit_status describes a process that was terminated +// by a signal, or exited normally with a nonzero exit code. +GTEST_API_ bool ExitedUnsuccessfully(int exit_status); + +// Traps C++ exceptions escaping statement and reports them as test +// failures. Note that trapping SEH exceptions is not implemented here. +# if GTEST_HAS_EXCEPTIONS +# define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } catch (const ::std::exception& gtest_exception) { \ + fprintf(\ + stderr, \ + "\n%s: Caught std::exception-derived exception escaping the " \ + "death test statement. Exception message: %s\n", \ + ::testing::internal::FormatFileLocation(__FILE__, __LINE__).c_str(), \ + gtest_exception.what()); \ + fflush(stderr); \ + death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \ + } catch (...) { \ + death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \ + } + +# else +# define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) + +# endif + +// This macro is for implementing ASSERT_DEATH*, EXPECT_DEATH*, +// ASSERT_EXIT*, and EXPECT_EXIT*. +# define GTEST_DEATH_TEST_(statement, predicate, regex, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + const ::testing::internal::RE& gtest_regex = (regex); \ + ::testing::internal::DeathTest* gtest_dt; \ + if (!::testing::internal::DeathTest::Create(#statement, >est_regex, \ + __FILE__, __LINE__, >est_dt)) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \ + } \ + if (gtest_dt != NULL) { \ + ::testing::internal::scoped_ptr< ::testing::internal::DeathTest> \ + gtest_dt_ptr(gtest_dt); \ + switch (gtest_dt->AssumeRole()) { \ + case ::testing::internal::DeathTest::OVERSEE_TEST: \ + if (!gtest_dt->Passed(predicate(gtest_dt->Wait()))) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \ + } \ + break; \ + case ::testing::internal::DeathTest::EXECUTE_TEST: { \ + ::testing::internal::DeathTest::ReturnSentinel \ + gtest_sentinel(gtest_dt); \ + GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, gtest_dt); \ + gtest_dt->Abort(::testing::internal::DeathTest::TEST_DID_NOT_DIE); \ + break; \ + } \ + default: \ + break; \ + } \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__): \ + fail(::testing::internal::DeathTest::LastMessage()) +// The symbol "fail" here expands to something into which a message +// can be streamed. + +// This macro is for implementing ASSERT/EXPECT_DEBUG_DEATH when compiled in +// NDEBUG mode. In this case we need the statements to be executed, the regex is +// ignored, and the macro must accept a streamed message even though the message +// is never printed. +# define GTEST_EXECUTE_STATEMENT_(statement, regex) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } else \ + ::testing::Message() + +// A class representing the parsed contents of the +// --gtest_internal_run_death_test flag, as it existed when +// RUN_ALL_TESTS was called. +class InternalRunDeathTestFlag { + public: + InternalRunDeathTestFlag(const std::string& a_file, + int a_line, + int an_index, + int a_write_fd) + : file_(a_file), line_(a_line), index_(an_index), + write_fd_(a_write_fd) {} + + ~InternalRunDeathTestFlag() { + if (write_fd_ >= 0) + posix::Close(write_fd_); + } + + const std::string& file() const { return file_; } + int line() const { return line_; } + int index() const { return index_; } + int write_fd() const { return write_fd_; } + + private: + std::string file_; + int line_; + int index_; + int write_fd_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(InternalRunDeathTestFlag); +}; + +// Returns a newly created InternalRunDeathTestFlag object with fields +// initialized from the GTEST_FLAG(internal_run_death_test) flag if +// the flag is specified; otherwise returns NULL. +InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag(); + +#else // GTEST_HAS_DEATH_TEST + +// This macro is used for implementing macros such as +// EXPECT_DEATH_IF_SUPPORTED and ASSERT_DEATH_IF_SUPPORTED on systems where +// death tests are not supported. Those macros must compile on such systems +// iff EXPECT_DEATH and ASSERT_DEATH compile with the same parameters on +// systems that support death tests. This allows one to write such a macro +// on a system that does not support death tests and be sure that it will +// compile on a death-test supporting system. +// +// Parameters: +// statement - A statement that a macro such as EXPECT_DEATH would test +// for program termination. This macro has to make sure this +// statement is compiled but not executed, to ensure that +// EXPECT_DEATH_IF_SUPPORTED compiles with a certain +// parameter iff EXPECT_DEATH compiles with it. +// regex - A regex that a macro such as EXPECT_DEATH would use to test +// the output of statement. This parameter has to be +// compiled but not evaluated by this macro, to ensure that +// this macro only accepts expressions that a macro such as +// EXPECT_DEATH would accept. +// terminator - Must be an empty statement for EXPECT_DEATH_IF_SUPPORTED +// and a return statement for ASSERT_DEATH_IF_SUPPORTED. +// This ensures that ASSERT_DEATH_IF_SUPPORTED will not +// compile inside functions where ASSERT_DEATH doesn't +// compile. +// +// The branch that has an always false condition is used to ensure that +// statement and regex are compiled (and thus syntactically correct) but +// never executed. The unreachable code macro protects the terminator +// statement from generating an 'unreachable code' warning in case +// statement unconditionally returns or throws. The Message constructor at +// the end allows the syntax of streaming additional messages into the +// macro, for compilational compatibility with EXPECT_DEATH/ASSERT_DEATH. +# define GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, terminator) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + GTEST_LOG_(WARNING) \ + << "Death tests are not supported on this platform.\n" \ + << "Statement '" #statement "' cannot be verified."; \ + } else if (::testing::internal::AlwaysFalse()) { \ + ::testing::internal::RE::PartialMatch(".*", (regex)); \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + terminator; \ + } else \ + ::testing::Message() + +#endif // GTEST_HAS_DEATH_TEST + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_ + +namespace testing { + +// This flag controls the style of death tests. Valid values are "threadsafe", +// meaning that the death test child process will re-execute the test binary +// from the start, running only a single death test, or "fast", +// meaning that the child process will execute the test logic immediately +// after forking. +GTEST_DECLARE_string_(death_test_style); + +#if GTEST_HAS_DEATH_TEST + +namespace internal { + +// Returns a Boolean value indicating whether the caller is currently +// executing in the context of the death test child process. Tools such as +// Valgrind heap checkers may need this to modify their behavior in death +// tests. IMPORTANT: This is an internal utility. Using it may break the +// implementation of death tests. User code MUST NOT use it. +GTEST_API_ bool InDeathTestChild(); + +} // namespace internal + +// The following macros are useful for writing death tests. + +// Here's what happens when an ASSERT_DEATH* or EXPECT_DEATH* is +// executed: +// +// 1. It generates a warning if there is more than one active +// thread. This is because it's safe to fork() or clone() only +// when there is a single thread. +// +// 2. The parent process clone()s a sub-process and runs the death +// test in it; the sub-process exits with code 0 at the end of the +// death test, if it hasn't exited already. +// +// 3. The parent process waits for the sub-process to terminate. +// +// 4. The parent process checks the exit code and error message of +// the sub-process. +// +// Examples: +// +// ASSERT_DEATH(server.SendMessage(56, "Hello"), "Invalid port number"); +// for (int i = 0; i < 5; i++) { +// EXPECT_DEATH(server.ProcessRequest(i), +// "Invalid request .* in ProcessRequest()") +// << "Failed to die on request " << i; +// } +// +// ASSERT_EXIT(server.ExitNow(), ::testing::ExitedWithCode(0), "Exiting"); +// +// bool KilledBySIGHUP(int exit_code) { +// return WIFSIGNALED(exit_code) && WTERMSIG(exit_code) == SIGHUP; +// } +// +// ASSERT_EXIT(client.HangUpServer(), KilledBySIGHUP, "Hanging up!"); +// +// On the regular expressions used in death tests: +// +// On POSIX-compliant systems (*nix), we use the library, +// which uses the POSIX extended regex syntax. +// +// On other platforms (e.g. Windows), we only support a simple regex +// syntax implemented as part of Google Test. This limited +// implementation should be enough most of the time when writing +// death tests; though it lacks many features you can find in PCRE +// or POSIX extended regex syntax. For example, we don't support +// union ("x|y"), grouping ("(xy)"), brackets ("[xy]"), and +// repetition count ("x{5,7}"), among others. +// +// Below is the syntax that we do support. We chose it to be a +// subset of both PCRE and POSIX extended regex, so it's easy to +// learn wherever you come from. In the following: 'A' denotes a +// literal character, period (.), or a single \\ escape sequence; +// 'x' and 'y' denote regular expressions; 'm' and 'n' are for +// natural numbers. +// +// c matches any literal character c +// \\d matches any decimal digit +// \\D matches any character that's not a decimal digit +// \\f matches \f +// \\n matches \n +// \\r matches \r +// \\s matches any ASCII whitespace, including \n +// \\S matches any character that's not a whitespace +// \\t matches \t +// \\v matches \v +// \\w matches any letter, _, or decimal digit +// \\W matches any character that \\w doesn't match +// \\c matches any literal character c, which must be a punctuation +// . matches any single character except \n +// A? matches 0 or 1 occurrences of A +// A* matches 0 or many occurrences of A +// A+ matches 1 or many occurrences of A +// ^ matches the beginning of a string (not that of each line) +// $ matches the end of a string (not that of each line) +// xy matches x followed by y +// +// If you accidentally use PCRE or POSIX extended regex features +// not implemented by us, you will get a run-time failure. In that +// case, please try to rewrite your regular expression within the +// above syntax. +// +// This implementation is *not* meant to be as highly tuned or robust +// as a compiled regex library, but should perform well enough for a +// death test, which already incurs significant overhead by launching +// a child process. +// +// Known caveats: +// +// A "threadsafe" style death test obtains the path to the test +// program from argv[0] and re-executes it in the sub-process. For +// simplicity, the current implementation doesn't search the PATH +// when launching the sub-process. This means that the user must +// invoke the test program via a path that contains at least one +// path separator (e.g. path/to/foo_test and +// /absolute/path/to/bar_test are fine, but foo_test is not). This +// is rarely a problem as people usually don't put the test binary +// directory in PATH. +// +// TODO(wan@google.com): make thread-safe death tests search the PATH. + +// Asserts that a given statement causes the program to exit, with an +// integer exit status that satisfies predicate, and emitting error output +// that matches regex. +# define ASSERT_EXIT(statement, predicate, regex) \ + GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_FATAL_FAILURE_) + +// Like ASSERT_EXIT, but continues on to successive tests in the +// test case, if any: +# define EXPECT_EXIT(statement, predicate, regex) \ + GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_NONFATAL_FAILURE_) + +// Asserts that a given statement causes the program to exit, either by +// explicitly exiting with a nonzero exit code or being killed by a +// signal, and emitting error output that matches regex. +# define ASSERT_DEATH(statement, regex) \ + ASSERT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex) + +// Like ASSERT_DEATH, but continues on to successive tests in the +// test case, if any: +# define EXPECT_DEATH(statement, regex) \ + EXPECT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex) + +// Two predicate classes that can be used in {ASSERT,EXPECT}_EXIT*: + +// Tests that an exit code describes a normal exit with a given exit code. +class GTEST_API_ ExitedWithCode { + public: + explicit ExitedWithCode(int exit_code); + bool operator()(int exit_status) const; + private: + // No implementation - assignment is unsupported. + void operator=(const ExitedWithCode& other); + + const int exit_code_; +}; + +# if !GTEST_OS_WINDOWS +// Tests that an exit code describes an exit due to termination by a +// given signal. +class GTEST_API_ KilledBySignal { + public: + explicit KilledBySignal(int signum); + bool operator()(int exit_status) const; + private: + const int signum_; +}; +# endif // !GTEST_OS_WINDOWS + +// EXPECT_DEBUG_DEATH asserts that the given statements die in debug mode. +// The death testing framework causes this to have interesting semantics, +// since the sideeffects of the call are only visible in opt mode, and not +// in debug mode. +// +// In practice, this can be used to test functions that utilize the +// LOG(DFATAL) macro using the following style: +// +// int DieInDebugOr12(int* sideeffect) { +// if (sideeffect) { +// *sideeffect = 12; +// } +// LOG(DFATAL) << "death"; +// return 12; +// } +// +// TEST(TestCase, TestDieOr12WorksInDgbAndOpt) { +// int sideeffect = 0; +// // Only asserts in dbg. +// EXPECT_DEBUG_DEATH(DieInDebugOr12(&sideeffect), "death"); +// +// #ifdef NDEBUG +// // opt-mode has sideeffect visible. +// EXPECT_EQ(12, sideeffect); +// #else +// // dbg-mode no visible sideeffect. +// EXPECT_EQ(0, sideeffect); +// #endif +// } +// +// This will assert that DieInDebugReturn12InOpt() crashes in debug +// mode, usually due to a DCHECK or LOG(DFATAL), but returns the +// appropriate fallback value (12 in this case) in opt mode. If you +// need to test that a function has appropriate side-effects in opt +// mode, include assertions against the side-effects. A general +// pattern for this is: +// +// EXPECT_DEBUG_DEATH({ +// // Side-effects here will have an effect after this statement in +// // opt mode, but none in debug mode. +// EXPECT_EQ(12, DieInDebugOr12(&sideeffect)); +// }, "death"); +// +# ifdef NDEBUG + +# define EXPECT_DEBUG_DEATH(statement, regex) \ + GTEST_EXECUTE_STATEMENT_(statement, regex) + +# define ASSERT_DEBUG_DEATH(statement, regex) \ + GTEST_EXECUTE_STATEMENT_(statement, regex) + +# else + +# define EXPECT_DEBUG_DEATH(statement, regex) \ + EXPECT_DEATH(statement, regex) + +# define ASSERT_DEBUG_DEATH(statement, regex) \ + ASSERT_DEATH(statement, regex) + +# endif // NDEBUG for EXPECT_DEBUG_DEATH +#endif // GTEST_HAS_DEATH_TEST + +// EXPECT_DEATH_IF_SUPPORTED(statement, regex) and +// ASSERT_DEATH_IF_SUPPORTED(statement, regex) expand to real death tests if +// death tests are supported; otherwise they just issue a warning. This is +// useful when you are combining death test assertions with normal test +// assertions in one test. +#if GTEST_HAS_DEATH_TEST +# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \ + EXPECT_DEATH(statement, regex) +# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \ + ASSERT_DEATH(statement, regex) +#else +# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \ + GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, ) +# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \ + GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, return) +#endif + +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_ +// This file was GENERATED by command: +// pump.py gtest-param-test.h.pump +// DO NOT EDIT BY HAND!!! + +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: vladl@google.com (Vlad Losev) +// +// Macros and functions for implementing parameterized tests +// in Google C++ Testing Framework (Google Test) +// +// This file is generated by a SCRIPT. DO NOT EDIT BY HAND! +// +#ifndef GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_ +#define GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_ + + +// Value-parameterized tests allow you to test your code with different +// parameters without writing multiple copies of the same test. +// +// Here is how you use value-parameterized tests: + +#if 0 + +// To write value-parameterized tests, first you should define a fixture +// class. It is usually derived from testing::TestWithParam (see below for +// another inheritance scheme that's sometimes useful in more complicated +// class hierarchies), where the type of your parameter values. +// TestWithParam is itself derived from testing::Test. T can be any +// copyable type. If it's a raw pointer, you are responsible for managing the +// lifespan of the pointed values. + +class FooTest : public ::testing::TestWithParam { + // You can implement all the usual class fixture members here. +}; + +// Then, use the TEST_P macro to define as many parameterized tests +// for this fixture as you want. The _P suffix is for "parameterized" +// or "pattern", whichever you prefer to think. + +TEST_P(FooTest, DoesBlah) { + // Inside a test, access the test parameter with the GetParam() method + // of the TestWithParam class: + EXPECT_TRUE(foo.Blah(GetParam())); + ... +} + +TEST_P(FooTest, HasBlahBlah) { + ... +} + +// Finally, you can use INSTANTIATE_TEST_CASE_P to instantiate the test +// case with any set of parameters you want. Google Test defines a number +// of functions for generating test parameters. They return what we call +// (surprise!) parameter generators. Here is a summary of them, which +// are all in the testing namespace: +// +// +// Range(begin, end [, step]) - Yields values {begin, begin+step, +// begin+step+step, ...}. The values do not +// include end. step defaults to 1. +// Values(v1, v2, ..., vN) - Yields values {v1, v2, ..., vN}. +// ValuesIn(container) - Yields values from a C-style array, an STL +// ValuesIn(begin,end) container, or an iterator range [begin, end). +// Bool() - Yields sequence {false, true}. +// Combine(g1, g2, ..., gN) - Yields all combinations (the Cartesian product +// for the math savvy) of the values generated +// by the N generators. +// +// For more details, see comments at the definitions of these functions below +// in this file. +// +// The following statement will instantiate tests from the FooTest test case +// each with parameter values "meeny", "miny", and "moe". + +INSTANTIATE_TEST_CASE_P(InstantiationName, + FooTest, + Values("meeny", "miny", "moe")); + +// To distinguish different instances of the pattern, (yes, you +// can instantiate it more then once) the first argument to the +// INSTANTIATE_TEST_CASE_P macro is a prefix that will be added to the +// actual test case name. Remember to pick unique prefixes for different +// instantiations. The tests from the instantiation above will have +// these names: +// +// * InstantiationName/FooTest.DoesBlah/0 for "meeny" +// * InstantiationName/FooTest.DoesBlah/1 for "miny" +// * InstantiationName/FooTest.DoesBlah/2 for "moe" +// * InstantiationName/FooTest.HasBlahBlah/0 for "meeny" +// * InstantiationName/FooTest.HasBlahBlah/1 for "miny" +// * InstantiationName/FooTest.HasBlahBlah/2 for "moe" +// +// You can use these names in --gtest_filter. +// +// This statement will instantiate all tests from FooTest again, each +// with parameter values "cat" and "dog": + +const char* pets[] = {"cat", "dog"}; +INSTANTIATE_TEST_CASE_P(AnotherInstantiationName, FooTest, ValuesIn(pets)); + +// The tests from the instantiation above will have these names: +// +// * AnotherInstantiationName/FooTest.DoesBlah/0 for "cat" +// * AnotherInstantiationName/FooTest.DoesBlah/1 for "dog" +// * AnotherInstantiationName/FooTest.HasBlahBlah/0 for "cat" +// * AnotherInstantiationName/FooTest.HasBlahBlah/1 for "dog" +// +// Please note that INSTANTIATE_TEST_CASE_P will instantiate all tests +// in the given test case, whether their definitions come before or +// AFTER the INSTANTIATE_TEST_CASE_P statement. +// +// Please also note that generator expressions (including parameters to the +// generators) are evaluated in InitGoogleTest(), after main() has started. +// This allows the user on one hand, to adjust generator parameters in order +// to dynamically determine a set of tests to run and on the other hand, +// give the user a chance to inspect the generated tests with Google Test +// reflection API before RUN_ALL_TESTS() is executed. +// +// You can see samples/sample7_unittest.cc and samples/sample8_unittest.cc +// for more examples. +// +// In the future, we plan to publish the API for defining new parameter +// generators. But for now this interface remains part of the internal +// implementation and is subject to change. +// +// +// A parameterized test fixture must be derived from testing::Test and from +// testing::WithParamInterface, where T is the type of the parameter +// values. Inheriting from TestWithParam satisfies that requirement because +// TestWithParam inherits from both Test and WithParamInterface. In more +// complicated hierarchies, however, it is occasionally useful to inherit +// separately from Test and WithParamInterface. For example: + +class BaseTest : public ::testing::Test { + // You can inherit all the usual members for a non-parameterized test + // fixture here. +}; + +class DerivedTest : public BaseTest, public ::testing::WithParamInterface { + // The usual test fixture members go here too. +}; + +TEST_F(BaseTest, HasFoo) { + // This is an ordinary non-parameterized test. +} + +TEST_P(DerivedTest, DoesBlah) { + // GetParam works just the same here as if you inherit from TestWithParam. + EXPECT_TRUE(foo.Blah(GetParam())); +} + +#endif // 0 + + +#if !GTEST_OS_SYMBIAN +# include +#endif + +// scripts/fuse_gtest.py depends on gtest's own header being #included +// *unconditionally*. Therefore these #includes cannot be moved +// inside #if GTEST_HAS_PARAM_TEST. +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: vladl@google.com (Vlad Losev) + +// Type and function utilities for implementing parameterized tests. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_ + +#include +#include +#include + +// scripts/fuse_gtest.py depends on gtest's own header being #included +// *unconditionally*. Therefore these #includes cannot be moved +// inside #if GTEST_HAS_PARAM_TEST. +// Copyright 2003 Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: Dan Egnor (egnor@google.com) +// +// A "smart" pointer type with reference tracking. Every pointer to a +// particular object is kept on a circular linked list. When the last pointer +// to an object is destroyed or reassigned, the object is deleted. +// +// Used properly, this deletes the object when the last reference goes away. +// There are several caveats: +// - Like all reference counting schemes, cycles lead to leaks. +// - Each smart pointer is actually two pointers (8 bytes instead of 4). +// - Every time a pointer is assigned, the entire list of pointers to that +// object is traversed. This class is therefore NOT SUITABLE when there +// will often be more than two or three pointers to a particular object. +// - References are only tracked as long as linked_ptr<> objects are copied. +// If a linked_ptr<> is converted to a raw pointer and back, BAD THINGS +// will happen (double deletion). +// +// A good use of this class is storing object references in STL containers. +// You can safely put linked_ptr<> in a vector<>. +// Other uses may not be as good. +// +// Note: If you use an incomplete type with linked_ptr<>, the class +// *containing* linked_ptr<> must have a constructor and destructor (even +// if they do nothing!). +// +// Bill Gibbons suggested we use something like this. +// +// Thread Safety: +// Unlike other linked_ptr implementations, in this implementation +// a linked_ptr object is thread-safe in the sense that: +// - it's safe to copy linked_ptr objects concurrently, +// - it's safe to copy *from* a linked_ptr and read its underlying +// raw pointer (e.g. via get()) concurrently, and +// - it's safe to write to two linked_ptrs that point to the same +// shared object concurrently. +// TODO(wan@google.com): rename this to safe_linked_ptr to avoid +// confusion with normal linked_ptr. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_ + +#include +#include + + +namespace testing { +namespace internal { + +// Protects copying of all linked_ptr objects. +GTEST_API_ GTEST_DECLARE_STATIC_MUTEX_(g_linked_ptr_mutex); + +// This is used internally by all instances of linked_ptr<>. It needs to be +// a non-template class because different types of linked_ptr<> can refer to +// the same object (linked_ptr(obj) vs linked_ptr(obj)). +// So, it needs to be possible for different types of linked_ptr to participate +// in the same circular linked list, so we need a single class type here. +// +// DO NOT USE THIS CLASS DIRECTLY YOURSELF. Use linked_ptr. +class linked_ptr_internal { + public: + // Create a new circle that includes only this instance. + void join_new() { + next_ = this; + } + + // Many linked_ptr operations may change p.link_ for some linked_ptr + // variable p in the same circle as this object. Therefore we need + // to prevent two such operations from occurring concurrently. + // + // Note that different types of linked_ptr objects can coexist in a + // circle (e.g. linked_ptr, linked_ptr, and + // linked_ptr). Therefore we must use a single mutex to + // protect all linked_ptr objects. This can create serious + // contention in production code, but is acceptable in a testing + // framework. + + // Join an existing circle. + void join(linked_ptr_internal const* ptr) + GTEST_LOCK_EXCLUDED_(g_linked_ptr_mutex) { + MutexLock lock(&g_linked_ptr_mutex); + + linked_ptr_internal const* p = ptr; + while (p->next_ != ptr) p = p->next_; + p->next_ = this; + next_ = ptr; + } + + // Leave whatever circle we're part of. Returns true if we were the + // last member of the circle. Once this is done, you can join() another. + bool depart() + GTEST_LOCK_EXCLUDED_(g_linked_ptr_mutex) { + MutexLock lock(&g_linked_ptr_mutex); + + if (next_ == this) return true; + linked_ptr_internal const* p = next_; + while (p->next_ != this) p = p->next_; + p->next_ = next_; + return false; + } + + private: + mutable linked_ptr_internal const* next_; +}; + +template +class linked_ptr { + public: + typedef T element_type; + + // Take over ownership of a raw pointer. This should happen as soon as + // possible after the object is created. + explicit linked_ptr(T* ptr = NULL) { capture(ptr); } + ~linked_ptr() { depart(); } + + // Copy an existing linked_ptr<>, adding ourselves to the list of references. + template linked_ptr(linked_ptr const& ptr) { copy(&ptr); } + linked_ptr(linked_ptr const& ptr) { // NOLINT + assert(&ptr != this); + copy(&ptr); + } + + // Assignment releases the old value and acquires the new. + template linked_ptr& operator=(linked_ptr const& ptr) { + depart(); + copy(&ptr); + return *this; + } + + linked_ptr& operator=(linked_ptr const& ptr) { + if (&ptr != this) { + depart(); + copy(&ptr); + } + return *this; + } + + // Smart pointer members. + void reset(T* ptr = NULL) { + depart(); + capture(ptr); + } + T* get() const { return value_; } + T* operator->() const { return value_; } + T& operator*() const { return *value_; } + + bool operator==(T* p) const { return value_ == p; } + bool operator!=(T* p) const { return value_ != p; } + template + bool operator==(linked_ptr const& ptr) const { + return value_ == ptr.get(); + } + template + bool operator!=(linked_ptr const& ptr) const { + return value_ != ptr.get(); + } + + private: + template + friend class linked_ptr; + + T* value_; + linked_ptr_internal link_; + + void depart() { + if (link_.depart()) delete value_; + } + + void capture(T* ptr) { + value_ = ptr; + link_.join_new(); + } + + template void copy(linked_ptr const* ptr) { + value_ = ptr->get(); + if (value_) + link_.join(&ptr->link_); + else + link_.join_new(); + } +}; + +template inline +bool operator==(T* ptr, const linked_ptr& x) { + return ptr == x.get(); +} + +template inline +bool operator!=(T* ptr, const linked_ptr& x) { + return ptr != x.get(); +} + +// A function to convert T* into linked_ptr +// Doing e.g. make_linked_ptr(new FooBarBaz(arg)) is a shorter notation +// for linked_ptr >(new FooBarBaz(arg)) +template +linked_ptr make_linked_ptr(T* ptr) { + return linked_ptr(ptr); +} + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_ +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Google Test - The Google C++ Testing Framework +// +// This file implements a universal value printer that can print a +// value of any type T: +// +// void ::testing::internal::UniversalPrinter::Print(value, ostream_ptr); +// +// A user can teach this function how to print a class type T by +// defining either operator<<() or PrintTo() in the namespace that +// defines T. More specifically, the FIRST defined function in the +// following list will be used (assuming T is defined in namespace +// foo): +// +// 1. foo::PrintTo(const T&, ostream*) +// 2. operator<<(ostream&, const T&) defined in either foo or the +// global namespace. +// +// If none of the above is defined, it will print the debug string of +// the value if it is a protocol buffer, or print the raw bytes in the +// value otherwise. +// +// To aid debugging: when T is a reference type, the address of the +// value is also printed; when T is a (const) char pointer, both the +// pointer value and the NUL-terminated string it points to are +// printed. +// +// We also provide some convenient wrappers: +// +// // Prints a value to a string. For a (const or not) char +// // pointer, the NUL-terminated string (but not the pointer) is +// // printed. +// std::string ::testing::PrintToString(const T& value); +// +// // Prints a value tersely: for a reference type, the referenced +// // value (but not the address) is printed; for a (const or not) char +// // pointer, the NUL-terminated string (but not the pointer) is +// // printed. +// void ::testing::internal::UniversalTersePrint(const T& value, ostream*); +// +// // Prints value using the type inferred by the compiler. The difference +// // from UniversalTersePrint() is that this function prints both the +// // pointer and the NUL-terminated string for a (const or not) char pointer. +// void ::testing::internal::UniversalPrint(const T& value, ostream*); +// +// // Prints the fields of a tuple tersely to a string vector, one +// // element for each field. Tuple support must be enabled in +// // gtest-port.h. +// std::vector UniversalTersePrintTupleFieldsToStrings( +// const Tuple& value); +// +// Known limitation: +// +// The print primitives print the elements of an STL-style container +// using the compiler-inferred type of *iter where iter is a +// const_iterator of the container. When const_iterator is an input +// iterator but not a forward iterator, this inferred type may not +// match value_type, and the print output may be incorrect. In +// practice, this is rarely a problem as for most containers +// const_iterator is a forward iterator. We'll fix this if there's an +// actual need for it. Note that this fix cannot rely on value_type +// being defined as many user-defined container types don't have +// value_type. + +#ifndef GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_ +#define GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_ + +#include // NOLINT +#include +#include +#include +#include + +namespace testing { + +// Definitions in the 'internal' and 'internal2' name spaces are +// subject to change without notice. DO NOT USE THEM IN USER CODE! +namespace internal2 { + +// Prints the given number of bytes in the given object to the given +// ostream. +GTEST_API_ void PrintBytesInObjectTo(const unsigned char* obj_bytes, + size_t count, + ::std::ostream* os); + +// For selecting which printer to use when a given type has neither << +// nor PrintTo(). +enum TypeKind { + kProtobuf, // a protobuf type + kConvertibleToInteger, // a type implicitly convertible to BiggestInt + // (e.g. a named or unnamed enum type) + kOtherType // anything else +}; + +// TypeWithoutFormatter::PrintValue(value, os) is called +// by the universal printer to print a value of type T when neither +// operator<< nor PrintTo() is defined for T, where kTypeKind is the +// "kind" of T as defined by enum TypeKind. +template +class TypeWithoutFormatter { + public: + // This default version is called when kTypeKind is kOtherType. + static void PrintValue(const T& value, ::std::ostream* os) { + PrintBytesInObjectTo(reinterpret_cast(&value), + sizeof(value), os); + } +}; + +// We print a protobuf using its ShortDebugString() when the string +// doesn't exceed this many characters; otherwise we print it using +// DebugString() for better readability. +const size_t kProtobufOneLinerMaxLength = 50; + +template +class TypeWithoutFormatter { + public: + static void PrintValue(const T& value, ::std::ostream* os) { + const ::testing::internal::string short_str = value.ShortDebugString(); + const ::testing::internal::string pretty_str = + short_str.length() <= kProtobufOneLinerMaxLength ? + short_str : ("\n" + value.DebugString()); + *os << ("<" + pretty_str + ">"); + } +}; + +template +class TypeWithoutFormatter { + public: + // Since T has no << operator or PrintTo() but can be implicitly + // converted to BiggestInt, we print it as a BiggestInt. + // + // Most likely T is an enum type (either named or unnamed), in which + // case printing it as an integer is the desired behavior. In case + // T is not an enum, printing it as an integer is the best we can do + // given that it has no user-defined printer. + static void PrintValue(const T& value, ::std::ostream* os) { + const internal::BiggestInt kBigInt = value; + *os << kBigInt; + } +}; + +// Prints the given value to the given ostream. If the value is a +// protocol message, its debug string is printed; if it's an enum or +// of a type implicitly convertible to BiggestInt, it's printed as an +// integer; otherwise the bytes in the value are printed. This is +// what UniversalPrinter::Print() does when it knows nothing about +// type T and T has neither << operator nor PrintTo(). +// +// A user can override this behavior for a class type Foo by defining +// a << operator in the namespace where Foo is defined. +// +// We put this operator in namespace 'internal2' instead of 'internal' +// to simplify the implementation, as much code in 'internal' needs to +// use << in STL, which would conflict with our own << were it defined +// in 'internal'. +// +// Note that this operator<< takes a generic std::basic_ostream type instead of the more restricted std::ostream. If +// we define it to take an std::ostream instead, we'll get an +// "ambiguous overloads" compiler error when trying to print a type +// Foo that supports streaming to std::basic_ostream, as the compiler cannot tell whether +// operator<<(std::ostream&, const T&) or +// operator<<(std::basic_stream, const Foo&) is more +// specific. +template +::std::basic_ostream& operator<<( + ::std::basic_ostream& os, const T& x) { + TypeWithoutFormatter::value ? kProtobuf : + internal::ImplicitlyConvertible::value ? + kConvertibleToInteger : kOtherType)>::PrintValue(x, &os); + return os; +} + +} // namespace internal2 +} // namespace testing + +// This namespace MUST NOT BE NESTED IN ::testing, or the name look-up +// magic needed for implementing UniversalPrinter won't work. +namespace testing_internal { + +// Used to print a value that is not an STL-style container when the +// user doesn't define PrintTo() for it. +template +void DefaultPrintNonContainerTo(const T& value, ::std::ostream* os) { + // With the following statement, during unqualified name lookup, + // testing::internal2::operator<< appears as if it was declared in + // the nearest enclosing namespace that contains both + // ::testing_internal and ::testing::internal2, i.e. the global + // namespace. For more details, refer to the C++ Standard section + // 7.3.4-1 [namespace.udir]. This allows us to fall back onto + // testing::internal2::operator<< in case T doesn't come with a << + // operator. + // + // We cannot write 'using ::testing::internal2::operator<<;', which + // gcc 3.3 fails to compile due to a compiler bug. + using namespace ::testing::internal2; // NOLINT + + // Assuming T is defined in namespace foo, in the next statement, + // the compiler will consider all of: + // + // 1. foo::operator<< (thanks to Koenig look-up), + // 2. ::operator<< (as the current namespace is enclosed in ::), + // 3. testing::internal2::operator<< (thanks to the using statement above). + // + // The operator<< whose type matches T best will be picked. + // + // We deliberately allow #2 to be a candidate, as sometimes it's + // impossible to define #1 (e.g. when foo is ::std, defining + // anything in it is undefined behavior unless you are a compiler + // vendor.). + *os << value; +} + +} // namespace testing_internal + +namespace testing { +namespace internal { + +// UniversalPrinter::Print(value, ostream_ptr) prints the given +// value to the given ostream. The caller must ensure that +// 'ostream_ptr' is not NULL, or the behavior is undefined. +// +// We define UniversalPrinter as a class template (as opposed to a +// function template), as we need to partially specialize it for +// reference types, which cannot be done with function templates. +template +class UniversalPrinter; + +template +void UniversalPrint(const T& value, ::std::ostream* os); + +// Used to print an STL-style container when the user doesn't define +// a PrintTo() for it. +template +void DefaultPrintTo(IsContainer /* dummy */, + false_type /* is not a pointer */, + const C& container, ::std::ostream* os) { + const size_t kMaxCount = 32; // The maximum number of elements to print. + *os << '{'; + size_t count = 0; + for (typename C::const_iterator it = container.begin(); + it != container.end(); ++it, ++count) { + if (count > 0) { + *os << ','; + if (count == kMaxCount) { // Enough has been printed. + *os << " ..."; + break; + } + } + *os << ' '; + // We cannot call PrintTo(*it, os) here as PrintTo() doesn't + // handle *it being a native array. + internal::UniversalPrint(*it, os); + } + + if (count > 0) { + *os << ' '; + } + *os << '}'; +} + +// Used to print a pointer that is neither a char pointer nor a member +// pointer, when the user doesn't define PrintTo() for it. (A member +// variable pointer or member function pointer doesn't really point to +// a location in the address space. Their representation is +// implementation-defined. Therefore they will be printed as raw +// bytes.) +template +void DefaultPrintTo(IsNotContainer /* dummy */, + true_type /* is a pointer */, + T* p, ::std::ostream* os) { + if (p == NULL) { + *os << "NULL"; + } else { + // C++ doesn't allow casting from a function pointer to any object + // pointer. + // + // IsTrue() silences warnings: "Condition is always true", + // "unreachable code". + if (IsTrue(ImplicitlyConvertible::value)) { + // T is not a function type. We just call << to print p, + // relying on ADL to pick up user-defined << for their pointer + // types, if any. + *os << p; + } else { + // T is a function type, so '*os << p' doesn't do what we want + // (it just prints p as bool). We want to print p as a const + // void*. However, we cannot cast it to const void* directly, + // even using reinterpret_cast, as earlier versions of gcc + // (e.g. 3.4.5) cannot compile the cast when p is a function + // pointer. Casting to UInt64 first solves the problem. + *os << reinterpret_cast( + reinterpret_cast(p)); + } + } +} + +// Used to print a non-container, non-pointer value when the user +// doesn't define PrintTo() for it. +template +void DefaultPrintTo(IsNotContainer /* dummy */, + false_type /* is not a pointer */, + const T& value, ::std::ostream* os) { + ::testing_internal::DefaultPrintNonContainerTo(value, os); +} + +// Prints the given value using the << operator if it has one; +// otherwise prints the bytes in it. This is what +// UniversalPrinter::Print() does when PrintTo() is not specialized +// or overloaded for type T. +// +// A user can override this behavior for a class type Foo by defining +// an overload of PrintTo() in the namespace where Foo is defined. We +// give the user this option as sometimes defining a << operator for +// Foo is not desirable (e.g. the coding style may prevent doing it, +// or there is already a << operator but it doesn't do what the user +// wants). +template +void PrintTo(const T& value, ::std::ostream* os) { + // DefaultPrintTo() is overloaded. The type of its first two + // arguments determine which version will be picked. If T is an + // STL-style container, the version for container will be called; if + // T is a pointer, the pointer version will be called; otherwise the + // generic version will be called. + // + // Note that we check for container types here, prior to we check + // for protocol message types in our operator<<. The rationale is: + // + // For protocol messages, we want to give people a chance to + // override Google Mock's format by defining a PrintTo() or + // operator<<. For STL containers, other formats can be + // incompatible with Google Mock's format for the container + // elements; therefore we check for container types here to ensure + // that our format is used. + // + // The second argument of DefaultPrintTo() is needed to bypass a bug + // in Symbian's C++ compiler that prevents it from picking the right + // overload between: + // + // PrintTo(const T& x, ...); + // PrintTo(T* x, ...); + DefaultPrintTo(IsContainerTest(0), is_pointer(), value, os); +} + +// The following list of PrintTo() overloads tells +// UniversalPrinter::Print() how to print standard types (built-in +// types, strings, plain arrays, and pointers). + +// Overloads for various char types. +GTEST_API_ void PrintTo(unsigned char c, ::std::ostream* os); +GTEST_API_ void PrintTo(signed char c, ::std::ostream* os); +inline void PrintTo(char c, ::std::ostream* os) { + // When printing a plain char, we always treat it as unsigned. This + // way, the output won't be affected by whether the compiler thinks + // char is signed or not. + PrintTo(static_cast(c), os); +} + +// Overloads for other simple built-in types. +inline void PrintTo(bool x, ::std::ostream* os) { + *os << (x ? "true" : "false"); +} + +// Overload for wchar_t type. +// Prints a wchar_t as a symbol if it is printable or as its internal +// code otherwise and also as its decimal code (except for L'\0'). +// The L'\0' char is printed as "L'\\0'". The decimal code is printed +// as signed integer when wchar_t is implemented by the compiler +// as a signed type and is printed as an unsigned integer when wchar_t +// is implemented as an unsigned type. +GTEST_API_ void PrintTo(wchar_t wc, ::std::ostream* os); + +// Overloads for C strings. +GTEST_API_ void PrintTo(const char* s, ::std::ostream* os); +inline void PrintTo(char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} + +// signed/unsigned char is often used for representing binary data, so +// we print pointers to it as void* to be safe. +inline void PrintTo(const signed char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} +inline void PrintTo(signed char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} +inline void PrintTo(const unsigned char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} +inline void PrintTo(unsigned char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} + +// MSVC can be configured to define wchar_t as a typedef of unsigned +// short. It defines _NATIVE_WCHAR_T_DEFINED when wchar_t is a native +// type. When wchar_t is a typedef, defining an overload for const +// wchar_t* would cause unsigned short* be printed as a wide string, +// possibly causing invalid memory accesses. +#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED) +// Overloads for wide C strings +GTEST_API_ void PrintTo(const wchar_t* s, ::std::ostream* os); +inline void PrintTo(wchar_t* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} +#endif + +// Overload for C arrays. Multi-dimensional arrays are printed +// properly. + +// Prints the given number of elements in an array, without printing +// the curly braces. +template +void PrintRawArrayTo(const T a[], size_t count, ::std::ostream* os) { + UniversalPrint(a[0], os); + for (size_t i = 1; i != count; i++) { + *os << ", "; + UniversalPrint(a[i], os); + } +} + +// Overloads for ::string and ::std::string. +#if GTEST_HAS_GLOBAL_STRING +GTEST_API_ void PrintStringTo(const ::string&s, ::std::ostream* os); +inline void PrintTo(const ::string& s, ::std::ostream* os) { + PrintStringTo(s, os); +} +#endif // GTEST_HAS_GLOBAL_STRING + +GTEST_API_ void PrintStringTo(const ::std::string&s, ::std::ostream* os); +inline void PrintTo(const ::std::string& s, ::std::ostream* os) { + PrintStringTo(s, os); +} + +// Overloads for ::wstring and ::std::wstring. +#if GTEST_HAS_GLOBAL_WSTRING +GTEST_API_ void PrintWideStringTo(const ::wstring&s, ::std::ostream* os); +inline void PrintTo(const ::wstring& s, ::std::ostream* os) { + PrintWideStringTo(s, os); +} +#endif // GTEST_HAS_GLOBAL_WSTRING + +#if GTEST_HAS_STD_WSTRING +GTEST_API_ void PrintWideStringTo(const ::std::wstring&s, ::std::ostream* os); +inline void PrintTo(const ::std::wstring& s, ::std::ostream* os) { + PrintWideStringTo(s, os); +} +#endif // GTEST_HAS_STD_WSTRING + +#if GTEST_HAS_TR1_TUPLE +// Overload for ::std::tr1::tuple. Needed for printing function arguments, +// which are packed as tuples. + +// Helper function for printing a tuple. T must be instantiated with +// a tuple type. +template +void PrintTupleTo(const T& t, ::std::ostream* os); + +// Overloaded PrintTo() for tuples of various arities. We support +// tuples of up-to 10 fields. The following implementation works +// regardless of whether tr1::tuple is implemented using the +// non-standard variadic template feature or not. + +inline void PrintTo(const ::std::tr1::tuple<>& t, ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo( + const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} +#endif // GTEST_HAS_TR1_TUPLE + +// Overload for std::pair. +template +void PrintTo(const ::std::pair& value, ::std::ostream* os) { + *os << '('; + // We cannot use UniversalPrint(value.first, os) here, as T1 may be + // a reference type. The same for printing value.second. + UniversalPrinter::Print(value.first, os); + *os << ", "; + UniversalPrinter::Print(value.second, os); + *os << ')'; +} + +// Implements printing a non-reference type T by letting the compiler +// pick the right overload of PrintTo() for T. +template +class UniversalPrinter { + public: + // MSVC warns about adding const to a function type, so we want to + // disable the warning. +#ifdef _MSC_VER +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4180) // Temporarily disables warning 4180. +#endif // _MSC_VER + + // Note: we deliberately don't call this PrintTo(), as that name + // conflicts with ::testing::internal::PrintTo in the body of the + // function. + static void Print(const T& value, ::std::ostream* os) { + // By default, ::testing::internal::PrintTo() is used for printing + // the value. + // + // Thanks to Koenig look-up, if T is a class and has its own + // PrintTo() function defined in its namespace, that function will + // be visible here. Since it is more specific than the generic ones + // in ::testing::internal, it will be picked by the compiler in the + // following statement - exactly what we want. + PrintTo(value, os); + } + +#ifdef _MSC_VER +# pragma warning(pop) // Restores the warning state. +#endif // _MSC_VER +}; + +// UniversalPrintArray(begin, len, os) prints an array of 'len' +// elements, starting at address 'begin'. +template +void UniversalPrintArray(const T* begin, size_t len, ::std::ostream* os) { + if (len == 0) { + *os << "{}"; + } else { + *os << "{ "; + const size_t kThreshold = 18; + const size_t kChunkSize = 8; + // If the array has more than kThreshold elements, we'll have to + // omit some details by printing only the first and the last + // kChunkSize elements. + // TODO(wan@google.com): let the user control the threshold using a flag. + if (len <= kThreshold) { + PrintRawArrayTo(begin, len, os); + } else { + PrintRawArrayTo(begin, kChunkSize, os); + *os << ", ..., "; + PrintRawArrayTo(begin + len - kChunkSize, kChunkSize, os); + } + *os << " }"; + } +} +// This overload prints a (const) char array compactly. +GTEST_API_ void UniversalPrintArray( + const char* begin, size_t len, ::std::ostream* os); + +// This overload prints a (const) wchar_t array compactly. +GTEST_API_ void UniversalPrintArray( + const wchar_t* begin, size_t len, ::std::ostream* os); + +// Implements printing an array type T[N]. +template +class UniversalPrinter { + public: + // Prints the given array, omitting some elements when there are too + // many. + static void Print(const T (&a)[N], ::std::ostream* os) { + UniversalPrintArray(a, N, os); + } +}; + +// Implements printing a reference type T&. +template +class UniversalPrinter { + public: + // MSVC warns about adding const to a function type, so we want to + // disable the warning. +#ifdef _MSC_VER +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4180) // Temporarily disables warning 4180. +#endif // _MSC_VER + + static void Print(const T& value, ::std::ostream* os) { + // Prints the address of the value. We use reinterpret_cast here + // as static_cast doesn't compile when T is a function type. + *os << "@" << reinterpret_cast(&value) << " "; + + // Then prints the value itself. + UniversalPrint(value, os); + } + +#ifdef _MSC_VER +# pragma warning(pop) // Restores the warning state. +#endif // _MSC_VER +}; + +// Prints a value tersely: for a reference type, the referenced value +// (but not the address) is printed; for a (const) char pointer, the +// NUL-terminated string (but not the pointer) is printed. + +template +class UniversalTersePrinter { + public: + static void Print(const T& value, ::std::ostream* os) { + UniversalPrint(value, os); + } +}; +template +class UniversalTersePrinter { + public: + static void Print(const T& value, ::std::ostream* os) { + UniversalPrint(value, os); + } +}; +template +class UniversalTersePrinter { + public: + static void Print(const T (&value)[N], ::std::ostream* os) { + UniversalPrinter::Print(value, os); + } +}; +template <> +class UniversalTersePrinter { + public: + static void Print(const char* str, ::std::ostream* os) { + if (str == NULL) { + *os << "NULL"; + } else { + UniversalPrint(string(str), os); + } + } +}; +template <> +class UniversalTersePrinter { + public: + static void Print(char* str, ::std::ostream* os) { + UniversalTersePrinter::Print(str, os); + } +}; + +#if GTEST_HAS_STD_WSTRING +template <> +class UniversalTersePrinter { + public: + static void Print(const wchar_t* str, ::std::ostream* os) { + if (str == NULL) { + *os << "NULL"; + } else { + UniversalPrint(::std::wstring(str), os); + } + } +}; +#endif + +template <> +class UniversalTersePrinter { + public: + static void Print(wchar_t* str, ::std::ostream* os) { + UniversalTersePrinter::Print(str, os); + } +}; + +template +void UniversalTersePrint(const T& value, ::std::ostream* os) { + UniversalTersePrinter::Print(value, os); +} + +// Prints a value using the type inferred by the compiler. The +// difference between this and UniversalTersePrint() is that for a +// (const) char pointer, this prints both the pointer and the +// NUL-terminated string. +template +void UniversalPrint(const T& value, ::std::ostream* os) { + // A workarond for the bug in VC++ 7.1 that prevents us from instantiating + // UniversalPrinter with T directly. + typedef T T1; + UniversalPrinter::Print(value, os); +} + +#if GTEST_HAS_TR1_TUPLE +typedef ::std::vector Strings; + +// This helper template allows PrintTo() for tuples and +// UniversalTersePrintTupleFieldsToStrings() to be defined by +// induction on the number of tuple fields. The idea is that +// TuplePrefixPrinter::PrintPrefixTo(t, os) prints the first N +// fields in tuple t, and can be defined in terms of +// TuplePrefixPrinter. + +// The inductive case. +template +struct TuplePrefixPrinter { + // Prints the first N fields of a tuple. + template + static void PrintPrefixTo(const Tuple& t, ::std::ostream* os) { + TuplePrefixPrinter::PrintPrefixTo(t, os); + *os << ", "; + UniversalPrinter::type> + ::Print(::std::tr1::get(t), os); + } + + // Tersely prints the first N fields of a tuple to a string vector, + // one element for each field. + template + static void TersePrintPrefixToStrings(const Tuple& t, Strings* strings) { + TuplePrefixPrinter::TersePrintPrefixToStrings(t, strings); + ::std::stringstream ss; + UniversalTersePrint(::std::tr1::get(t), &ss); + strings->push_back(ss.str()); + } +}; + +// Base cases. +template <> +struct TuplePrefixPrinter<0> { + template + static void PrintPrefixTo(const Tuple&, ::std::ostream*) {} + + template + static void TersePrintPrefixToStrings(const Tuple&, Strings*) {} +}; +// We have to specialize the entire TuplePrefixPrinter<> class +// template here, even though the definition of +// TersePrintPrefixToStrings() is the same as the generic version, as +// Embarcadero (formerly CodeGear, formerly Borland) C++ doesn't +// support specializing a method template of a class template. +template <> +struct TuplePrefixPrinter<1> { + template + static void PrintPrefixTo(const Tuple& t, ::std::ostream* os) { + UniversalPrinter::type>:: + Print(::std::tr1::get<0>(t), os); + } + + template + static void TersePrintPrefixToStrings(const Tuple& t, Strings* strings) { + ::std::stringstream ss; + UniversalTersePrint(::std::tr1::get<0>(t), &ss); + strings->push_back(ss.str()); + } +}; + +// Helper function for printing a tuple. T must be instantiated with +// a tuple type. +template +void PrintTupleTo(const T& t, ::std::ostream* os) { + *os << "("; + TuplePrefixPrinter< ::std::tr1::tuple_size::value>:: + PrintPrefixTo(t, os); + *os << ")"; +} + +// Prints the fields of a tuple tersely to a string vector, one +// element for each field. See the comment before +// UniversalTersePrint() for how we define "tersely". +template +Strings UniversalTersePrintTupleFieldsToStrings(const Tuple& value) { + Strings result; + TuplePrefixPrinter< ::std::tr1::tuple_size::value>:: + TersePrintPrefixToStrings(value, &result); + return result; +} +#endif // GTEST_HAS_TR1_TUPLE + +} // namespace internal + +template +::std::string PrintToString(const T& value) { + ::std::stringstream ss; + internal::UniversalTersePrinter::Print(value, &ss); + return ss.str(); +} + +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_ + +#if GTEST_HAS_PARAM_TEST + +namespace testing { +namespace internal { + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Outputs a message explaining invalid registration of different +// fixture class for the same test case. This may happen when +// TEST_P macro is used to define two tests with the same name +// but in different namespaces. +GTEST_API_ void ReportInvalidTestCaseType(const char* test_case_name, + const char* file, int line); + +template class ParamGeneratorInterface; +template class ParamGenerator; + +// Interface for iterating over elements provided by an implementation +// of ParamGeneratorInterface. +template +class ParamIteratorInterface { + public: + virtual ~ParamIteratorInterface() {} + // A pointer to the base generator instance. + // Used only for the purposes of iterator comparison + // to make sure that two iterators belong to the same generator. + virtual const ParamGeneratorInterface* BaseGenerator() const = 0; + // Advances iterator to point to the next element + // provided by the generator. The caller is responsible + // for not calling Advance() on an iterator equal to + // BaseGenerator()->End(). + virtual void Advance() = 0; + // Clones the iterator object. Used for implementing copy semantics + // of ParamIterator. + virtual ParamIteratorInterface* Clone() const = 0; + // Dereferences the current iterator and provides (read-only) access + // to the pointed value. It is the caller's responsibility not to call + // Current() on an iterator equal to BaseGenerator()->End(). + // Used for implementing ParamGenerator::operator*(). + virtual const T* Current() const = 0; + // Determines whether the given iterator and other point to the same + // element in the sequence generated by the generator. + // Used for implementing ParamGenerator::operator==(). + virtual bool Equals(const ParamIteratorInterface& other) const = 0; +}; + +// Class iterating over elements provided by an implementation of +// ParamGeneratorInterface. It wraps ParamIteratorInterface +// and implements the const forward iterator concept. +template +class ParamIterator { + public: + typedef T value_type; + typedef const T& reference; + typedef ptrdiff_t difference_type; + + // ParamIterator assumes ownership of the impl_ pointer. + ParamIterator(const ParamIterator& other) : impl_(other.impl_->Clone()) {} + ParamIterator& operator=(const ParamIterator& other) { + if (this != &other) + impl_.reset(other.impl_->Clone()); + return *this; + } + + const T& operator*() const { return *impl_->Current(); } + const T* operator->() const { return impl_->Current(); } + // Prefix version of operator++. + ParamIterator& operator++() { + impl_->Advance(); + return *this; + } + // Postfix version of operator++. + ParamIterator operator++(int /*unused*/) { + ParamIteratorInterface* clone = impl_->Clone(); + impl_->Advance(); + return ParamIterator(clone); + } + bool operator==(const ParamIterator& other) const { + return impl_.get() == other.impl_.get() || impl_->Equals(*other.impl_); + } + bool operator!=(const ParamIterator& other) const { + return !(*this == other); + } + + private: + friend class ParamGenerator; + explicit ParamIterator(ParamIteratorInterface* impl) : impl_(impl) {} + scoped_ptr > impl_; +}; + +// ParamGeneratorInterface is the binary interface to access generators +// defined in other translation units. +template +class ParamGeneratorInterface { + public: + typedef T ParamType; + + virtual ~ParamGeneratorInterface() {} + + // Generator interface definition + virtual ParamIteratorInterface* Begin() const = 0; + virtual ParamIteratorInterface* End() const = 0; +}; + +// Wraps ParamGeneratorInterface and provides general generator syntax +// compatible with the STL Container concept. +// This class implements copy initialization semantics and the contained +// ParamGeneratorInterface instance is shared among all copies +// of the original object. This is possible because that instance is immutable. +template +class ParamGenerator { + public: + typedef ParamIterator iterator; + + explicit ParamGenerator(ParamGeneratorInterface* impl) : impl_(impl) {} + ParamGenerator(const ParamGenerator& other) : impl_(other.impl_) {} + + ParamGenerator& operator=(const ParamGenerator& other) { + impl_ = other.impl_; + return *this; + } + + iterator begin() const { return iterator(impl_->Begin()); } + iterator end() const { return iterator(impl_->End()); } + + private: + linked_ptr > impl_; +}; + +// Generates values from a range of two comparable values. Can be used to +// generate sequences of user-defined types that implement operator+() and +// operator<(). +// This class is used in the Range() function. +template +class RangeGenerator : public ParamGeneratorInterface { + public: + RangeGenerator(T begin, T end, IncrementT step) + : begin_(begin), end_(end), + step_(step), end_index_(CalculateEndIndex(begin, end, step)) {} + virtual ~RangeGenerator() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, begin_, 0, step_); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, end_, end_index_, step_); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, T value, int index, + IncrementT step) + : base_(base), value_(value), index_(index), step_(step) {} + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + virtual void Advance() { + value_ = value_ + step_; + index_++; + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const T* Current() const { return &value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const int other_index = + CheckedDowncastToActualType(&other)->index_; + return index_ == other_index; + } + + private: + Iterator(const Iterator& other) + : ParamIteratorInterface(), + base_(other.base_), value_(other.value_), index_(other.index_), + step_(other.step_) {} + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + T value_; + int index_; + const IncrementT step_; + }; // class RangeGenerator::Iterator + + static int CalculateEndIndex(const T& begin, + const T& end, + const IncrementT& step) { + int end_index = 0; + for (T i = begin; i < end; i = i + step) + end_index++; + return end_index; + } + + // No implementation - assignment is unsupported. + void operator=(const RangeGenerator& other); + + const T begin_; + const T end_; + const IncrementT step_; + // The index for the end() iterator. All the elements in the generated + // sequence are indexed (0-based) to aid iterator comparison. + const int end_index_; +}; // class RangeGenerator + + +// Generates values from a pair of STL-style iterators. Used in the +// ValuesIn() function. The elements are copied from the source range +// since the source can be located on the stack, and the generator +// is likely to persist beyond that stack frame. +template +class ValuesInIteratorRangeGenerator : public ParamGeneratorInterface { + public: + template + ValuesInIteratorRangeGenerator(ForwardIterator begin, ForwardIterator end) + : container_(begin, end) {} + virtual ~ValuesInIteratorRangeGenerator() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, container_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, container_.end()); + } + + private: + typedef typename ::std::vector ContainerType; + + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + typename ContainerType::const_iterator iterator) + : base_(base), iterator_(iterator) {} + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + virtual void Advance() { + ++iterator_; + value_.reset(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + // We need to use cached value referenced by iterator_ because *iterator_ + // can return a temporary object (and of type other then T), so just + // having "return &*iterator_;" doesn't work. + // value_ is updated here and not in Advance() because Advance() + // can advance iterator_ beyond the end of the range, and we cannot + // detect that fact. The client code, on the other hand, is + // responsible for not calling Current() on an out-of-range iterator. + virtual const T* Current() const { + if (value_.get() == NULL) + value_.reset(new T(*iterator_)); + return value_.get(); + } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + return iterator_ == + CheckedDowncastToActualType(&other)->iterator_; + } + + private: + Iterator(const Iterator& other) + // The explicit constructor call suppresses a false warning + // emitted by gcc when supplied with the -Wextra option. + : ParamIteratorInterface(), + base_(other.base_), + iterator_(other.iterator_) {} + + const ParamGeneratorInterface* const base_; + typename ContainerType::const_iterator iterator_; + // A cached value of *iterator_. We keep it here to allow access by + // pointer in the wrapping iterator's operator->(). + // value_ needs to be mutable to be accessed in Current(). + // Use of scoped_ptr helps manage cached value's lifetime, + // which is bound by the lifespan of the iterator itself. + mutable scoped_ptr value_; + }; // class ValuesInIteratorRangeGenerator::Iterator + + // No implementation - assignment is unsupported. + void operator=(const ValuesInIteratorRangeGenerator& other); + + const ContainerType container_; +}; // class ValuesInIteratorRangeGenerator + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Stores a parameter value and later creates tests parameterized with that +// value. +template +class ParameterizedTestFactory : public TestFactoryBase { + public: + typedef typename TestClass::ParamType ParamType; + explicit ParameterizedTestFactory(ParamType parameter) : + parameter_(parameter) {} + virtual Test* CreateTest() { + TestClass::SetParam(¶meter_); + return new TestClass(); + } + + private: + const ParamType parameter_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestFactory); +}; + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// TestMetaFactoryBase is a base class for meta-factories that create +// test factories for passing into MakeAndRegisterTestInfo function. +template +class TestMetaFactoryBase { + public: + virtual ~TestMetaFactoryBase() {} + + virtual TestFactoryBase* CreateTestFactory(ParamType parameter) = 0; +}; + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// TestMetaFactory creates test factories for passing into +// MakeAndRegisterTestInfo function. Since MakeAndRegisterTestInfo receives +// ownership of test factory pointer, same factory object cannot be passed +// into that method twice. But ParameterizedTestCaseInfo is going to call +// it for each Test/Parameter value combination. Thus it needs meta factory +// creator class. +template +class TestMetaFactory + : public TestMetaFactoryBase { + public: + typedef typename TestCase::ParamType ParamType; + + TestMetaFactory() {} + + virtual TestFactoryBase* CreateTestFactory(ParamType parameter) { + return new ParameterizedTestFactory(parameter); + } + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestMetaFactory); +}; + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// ParameterizedTestCaseInfoBase is a generic interface +// to ParameterizedTestCaseInfo classes. ParameterizedTestCaseInfoBase +// accumulates test information provided by TEST_P macro invocations +// and generators provided by INSTANTIATE_TEST_CASE_P macro invocations +// and uses that information to register all resulting test instances +// in RegisterTests method. The ParameterizeTestCaseRegistry class holds +// a collection of pointers to the ParameterizedTestCaseInfo objects +// and calls RegisterTests() on each of them when asked. +class ParameterizedTestCaseInfoBase { + public: + virtual ~ParameterizedTestCaseInfoBase() {} + + // Base part of test case name for display purposes. + virtual const string& GetTestCaseName() const = 0; + // Test case id to verify identity. + virtual TypeId GetTestCaseTypeId() const = 0; + // UnitTest class invokes this method to register tests in this + // test case right before running them in RUN_ALL_TESTS macro. + // This method should not be called more then once on any single + // instance of a ParameterizedTestCaseInfoBase derived class. + virtual void RegisterTests() = 0; + + protected: + ParameterizedTestCaseInfoBase() {} + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfoBase); +}; + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// ParameterizedTestCaseInfo accumulates tests obtained from TEST_P +// macro invocations for a particular test case and generators +// obtained from INSTANTIATE_TEST_CASE_P macro invocations for that +// test case. It registers tests with all values generated by all +// generators when asked. +template +class ParameterizedTestCaseInfo : public ParameterizedTestCaseInfoBase { + public: + // ParamType and GeneratorCreationFunc are private types but are required + // for declarations of public methods AddTestPattern() and + // AddTestCaseInstantiation(). + typedef typename TestCase::ParamType ParamType; + // A function that returns an instance of appropriate generator type. + typedef ParamGenerator(GeneratorCreationFunc)(); + + explicit ParameterizedTestCaseInfo(const char* name) + : test_case_name_(name) {} + + // Test case base name for display purposes. + virtual const string& GetTestCaseName() const { return test_case_name_; } + // Test case id to verify identity. + virtual TypeId GetTestCaseTypeId() const { return GetTypeId(); } + // TEST_P macro uses AddTestPattern() to record information + // about a single test in a LocalTestInfo structure. + // test_case_name is the base name of the test case (without invocation + // prefix). test_base_name is the name of an individual test without + // parameter index. For the test SequenceA/FooTest.DoBar/1 FooTest is + // test case base name and DoBar is test base name. + void AddTestPattern(const char* test_case_name, + const char* test_base_name, + TestMetaFactoryBase* meta_factory) { + tests_.push_back(linked_ptr(new TestInfo(test_case_name, + test_base_name, + meta_factory))); + } + // INSTANTIATE_TEST_CASE_P macro uses AddGenerator() to record information + // about a generator. + int AddTestCaseInstantiation(const string& instantiation_name, + GeneratorCreationFunc* func, + const char* /* file */, + int /* line */) { + instantiations_.push_back(::std::make_pair(instantiation_name, func)); + return 0; // Return value used only to run this method in namespace scope. + } + // UnitTest class invokes this method to register tests in this test case + // test cases right before running tests in RUN_ALL_TESTS macro. + // This method should not be called more then once on any single + // instance of a ParameterizedTestCaseInfoBase derived class. + // UnitTest has a guard to prevent from calling this method more then once. + virtual void RegisterTests() { + for (typename TestInfoContainer::iterator test_it = tests_.begin(); + test_it != tests_.end(); ++test_it) { + linked_ptr test_info = *test_it; + for (typename InstantiationContainer::iterator gen_it = + instantiations_.begin(); gen_it != instantiations_.end(); + ++gen_it) { + const string& instantiation_name = gen_it->first; + ParamGenerator generator((*gen_it->second)()); + + string test_case_name; + if ( !instantiation_name.empty() ) + test_case_name = instantiation_name + "/"; + test_case_name += test_info->test_case_base_name; + + int i = 0; + for (typename ParamGenerator::iterator param_it = + generator.begin(); + param_it != generator.end(); ++param_it, ++i) { + Message test_name_stream; + test_name_stream << test_info->test_base_name << "/" << i; + MakeAndRegisterTestInfo( + test_case_name.c_str(), + test_name_stream.GetString().c_str(), + NULL, // No type parameter. + PrintToString(*param_it).c_str(), + GetTestCaseTypeId(), + TestCase::SetUpTestCase, + TestCase::TearDownTestCase, + test_info->test_meta_factory->CreateTestFactory(*param_it)); + } // for param_it + } // for gen_it + } // for test_it + } // RegisterTests + + private: + // LocalTestInfo structure keeps information about a single test registered + // with TEST_P macro. + struct TestInfo { + TestInfo(const char* a_test_case_base_name, + const char* a_test_base_name, + TestMetaFactoryBase* a_test_meta_factory) : + test_case_base_name(a_test_case_base_name), + test_base_name(a_test_base_name), + test_meta_factory(a_test_meta_factory) {} + + const string test_case_base_name; + const string test_base_name; + const scoped_ptr > test_meta_factory; + }; + typedef ::std::vector > TestInfoContainer; + // Keeps pairs of + // received from INSTANTIATE_TEST_CASE_P macros. + typedef ::std::vector > + InstantiationContainer; + + const string test_case_name_; + TestInfoContainer tests_; + InstantiationContainer instantiations_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfo); +}; // class ParameterizedTestCaseInfo + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// ParameterizedTestCaseRegistry contains a map of ParameterizedTestCaseInfoBase +// classes accessed by test case names. TEST_P and INSTANTIATE_TEST_CASE_P +// macros use it to locate their corresponding ParameterizedTestCaseInfo +// descriptors. +class ParameterizedTestCaseRegistry { + public: + ParameterizedTestCaseRegistry() {} + ~ParameterizedTestCaseRegistry() { + for (TestCaseInfoContainer::iterator it = test_case_infos_.begin(); + it != test_case_infos_.end(); ++it) { + delete *it; + } + } + + // Looks up or creates and returns a structure containing information about + // tests and instantiations of a particular test case. + template + ParameterizedTestCaseInfo* GetTestCasePatternHolder( + const char* test_case_name, + const char* file, + int line) { + ParameterizedTestCaseInfo* typed_test_info = NULL; + for (TestCaseInfoContainer::iterator it = test_case_infos_.begin(); + it != test_case_infos_.end(); ++it) { + if ((*it)->GetTestCaseName() == test_case_name) { + if ((*it)->GetTestCaseTypeId() != GetTypeId()) { + // Complain about incorrect usage of Google Test facilities + // and terminate the program since we cannot guaranty correct + // test case setup and tear-down in this case. + ReportInvalidTestCaseType(test_case_name, file, line); + posix::Abort(); + } else { + // At this point we are sure that the object we found is of the same + // type we are looking for, so we downcast it to that type + // without further checks. + typed_test_info = CheckedDowncastToActualType< + ParameterizedTestCaseInfo >(*it); + } + break; + } + } + if (typed_test_info == NULL) { + typed_test_info = new ParameterizedTestCaseInfo(test_case_name); + test_case_infos_.push_back(typed_test_info); + } + return typed_test_info; + } + void RegisterTests() { + for (TestCaseInfoContainer::iterator it = test_case_infos_.begin(); + it != test_case_infos_.end(); ++it) { + (*it)->RegisterTests(); + } + } + + private: + typedef ::std::vector TestCaseInfoContainer; + + TestCaseInfoContainer test_case_infos_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseRegistry); +}; + +} // namespace internal +} // namespace testing + +#endif // GTEST_HAS_PARAM_TEST + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_ +// This file was GENERATED by command: +// pump.py gtest-param-util-generated.h.pump +// DO NOT EDIT BY HAND!!! + +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: vladl@google.com (Vlad Losev) + +// Type and function utilities for implementing parameterized tests. +// This file is generated by a SCRIPT. DO NOT EDIT BY HAND! +// +// Currently Google Test supports at most 50 arguments in Values, +// and at most 10 arguments in Combine. Please contact +// googletestframework@googlegroups.com if you need more. +// Please note that the number of arguments to Combine is limited +// by the maximum arity of the implementation of tr1::tuple which is +// currently set at 10. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_ + +// scripts/fuse_gtest.py depends on gtest's own header being #included +// *unconditionally*. Therefore these #includes cannot be moved +// inside #if GTEST_HAS_PARAM_TEST. + +#if GTEST_HAS_PARAM_TEST + +namespace testing { + +// Forward declarations of ValuesIn(), which is implemented in +// include/gtest/gtest-param-test.h. +template +internal::ParamGenerator< + typename ::testing::internal::IteratorTraits::value_type> +ValuesIn(ForwardIterator begin, ForwardIterator end); + +template +internal::ParamGenerator ValuesIn(const T (&array)[N]); + +template +internal::ParamGenerator ValuesIn( + const Container& container); + +namespace internal { + +// Used in the Values() function to provide polymorphic capabilities. +template +class ValueArray1 { + public: + explicit ValueArray1(T1 v1) : v1_(v1) {} + + template + operator ParamGenerator() const { return ValuesIn(&v1_, &v1_ + 1); } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray1& other); + + const T1 v1_; +}; + +template +class ValueArray2 { + public: + ValueArray2(T1 v1, T2 v2) : v1_(v1), v2_(v2) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray2& other); + + const T1 v1_; + const T2 v2_; +}; + +template +class ValueArray3 { + public: + ValueArray3(T1 v1, T2 v2, T3 v3) : v1_(v1), v2_(v2), v3_(v3) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray3& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; +}; + +template +class ValueArray4 { + public: + ValueArray4(T1 v1, T2 v2, T3 v3, T4 v4) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray4& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; +}; + +template +class ValueArray5 { + public: + ValueArray5(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray5& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; +}; + +template +class ValueArray6 { + public: + ValueArray6(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray6& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; +}; + +template +class ValueArray7 { + public: + ValueArray7(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray7& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; +}; + +template +class ValueArray8 { + public: + ValueArray8(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray8& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; +}; + +template +class ValueArray9 { + public: + ValueArray9(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, + T9 v9) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray9& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; +}; + +template +class ValueArray10 { + public: + ValueArray10(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray10& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; +}; + +template +class ValueArray11 { + public: + ValueArray11(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), + v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray11& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; +}; + +template +class ValueArray12 { + public: + ValueArray12(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), + v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray12& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; +}; + +template +class ValueArray13 { + public: + ValueArray13(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), + v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), + v12_(v12), v13_(v13) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray13& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; +}; + +template +class ValueArray14 { + public: + ValueArray14(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray14& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; +}; + +template +class ValueArray15 { + public: + ValueArray15(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray15& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; +}; + +template +class ValueArray16 { + public: + ValueArray16(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), + v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), + v16_(v16) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray16& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; +}; + +template +class ValueArray17 { + public: + ValueArray17(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, + T17 v17) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray17& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; +}; + +template +class ValueArray18 { + public: + ValueArray18(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray18& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; +}; + +template +class ValueArray19 { + public: + ValueArray19(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), + v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), + v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray19& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; +}; + +template +class ValueArray20 { + public: + ValueArray20(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), + v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), + v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), + v19_(v19), v20_(v20) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray20& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; +}; + +template +class ValueArray21 { + public: + ValueArray21(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), + v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), + v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), + v18_(v18), v19_(v19), v20_(v20), v21_(v21) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray21& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; +}; + +template +class ValueArray22 { + public: + ValueArray22(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray22& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; +}; + +template +class ValueArray23 { + public: + ValueArray23(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray23& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; +}; + +template +class ValueArray24 { + public: + ValueArray24(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), + v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), + v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), + v22_(v22), v23_(v23), v24_(v24) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray24& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; +}; + +template +class ValueArray25 { + public: + ValueArray25(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, + T25 v25) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray25& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; +}; + +template +class ValueArray26 { + public: + ValueArray26(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray26& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; +}; + +template +class ValueArray27 { + public: + ValueArray27(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), + v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), + v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), + v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), + v26_(v26), v27_(v27) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray27& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; +}; + +template +class ValueArray28 { + public: + ValueArray28(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), + v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), + v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), + v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), + v25_(v25), v26_(v26), v27_(v27), v28_(v28) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray28& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; +}; + +template +class ValueArray29 { + public: + ValueArray29(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), + v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), + v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), + v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), + v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray29& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; +}; + +template +class ValueArray30 { + public: + ValueArray30(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray30& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; +}; + +template +class ValueArray31 { + public: + ValueArray31(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30), v31_(v31) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray31& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; +}; + +template +class ValueArray32 { + public: + ValueArray32(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), + v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), + v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), + v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), + v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray32& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; +}; + +template +class ValueArray33 { + public: + ValueArray33(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, + T33 v33) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray33& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; +}; + +template +class ValueArray34 { + public: + ValueArray34(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33), v34_(v34) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray34& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; +}; + +template +class ValueArray35 { + public: + ValueArray35(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), + v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), + v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), + v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), + v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), + v32_(v32), v33_(v33), v34_(v34), v35_(v35) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray35& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; +}; + +template +class ValueArray36 { + public: + ValueArray36(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), + v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), + v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), + v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), + v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), + v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray36& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; +}; + +template +class ValueArray37 { + public: + ValueArray37(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), + v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), + v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), + v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), + v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), + v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), + v36_(v36), v37_(v37) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray37& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; +}; + +template +class ValueArray38 { + public: + ValueArray38(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), + v35_(v35), v36_(v36), v37_(v37), v38_(v38) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray38& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; +}; + +template +class ValueArray39 { + public: + ValueArray39(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), + v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray39& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; +}; + +template +class ValueArray40 { + public: + ValueArray40(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), + v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), + v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), + v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), + v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), + v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), + v40_(v40) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray40& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; +}; + +template +class ValueArray41 { + public: + ValueArray41(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, + T41 v41) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), + v39_(v39), v40_(v40), v41_(v41) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray41& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; +}; + +template +class ValueArray42 { + public: + ValueArray42(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), + v39_(v39), v40_(v40), v41_(v41), v42_(v42) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray42& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; +}; + +template +class ValueArray43 { + public: + ValueArray43(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), + v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), + v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), + v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), + v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), + v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), + v38_(v38), v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray43& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; +}; + +template +class ValueArray44 { + public: + ValueArray44(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), + v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), + v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), + v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), + v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), + v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36), + v37_(v37), v38_(v38), v39_(v39), v40_(v40), v41_(v41), v42_(v42), + v43_(v43), v44_(v44) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray44& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; +}; + +template +class ValueArray45 { + public: + ValueArray45(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), + v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), + v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), + v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), + v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), + v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), + v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40), v41_(v41), + v42_(v42), v43_(v43), v44_(v44), v45_(v45) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_), + static_cast(v45_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray45& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; +}; + +template +class ValueArray46 { + public: + ValueArray46(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45, T46 v46) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), + v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40), + v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), v46_(v46) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_), + static_cast(v45_), static_cast(v46_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray46& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; + const T46 v46_; +}; + +template +class ValueArray47 { + public: + ValueArray47(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), + v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40), + v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), v46_(v46), + v47_(v47) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_), + static_cast(v45_), static_cast(v46_), static_cast(v47_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray47& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; + const T46 v46_; + const T47 v47_; +}; + +template +class ValueArray48 { + public: + ValueArray48(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), + v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), + v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), + v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), + v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), + v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), + v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), + v46_(v46), v47_(v47), v48_(v48) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_), + static_cast(v45_), static_cast(v46_), static_cast(v47_), + static_cast(v48_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray48& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; + const T46 v46_; + const T47 v47_; + const T48 v48_; +}; + +template +class ValueArray49 { + public: + ValueArray49(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48, + T49 v49) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), + v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44), + v45_(v45), v46_(v46), v47_(v47), v48_(v48), v49_(v49) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_), + static_cast(v45_), static_cast(v46_), static_cast(v47_), + static_cast(v48_), static_cast(v49_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray49& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; + const T46 v46_; + const T47 v47_; + const T48 v48_; + const T49 v49_; +}; + +template +class ValueArray50 { + public: + ValueArray50(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48, T49 v49, + T50 v50) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), + v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44), + v45_(v45), v46_(v46), v47_(v47), v48_(v48), v49_(v49), v50_(v50) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_), + static_cast(v45_), static_cast(v46_), static_cast(v47_), + static_cast(v48_), static_cast(v49_), static_cast(v50_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray50& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; + const T46 v46_; + const T47 v47_; + const T48 v48_; + const T49 v49_; + const T50 v50_; +}; + +# if GTEST_HAS_COMBINE +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Generates values from the Cartesian product of values produced +// by the argument generators. +// +template +class CartesianProductGenerator2 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator2(const ParamGenerator& g1, + const ParamGenerator& g2) + : g1_(g1), g2_(g2) {} + virtual ~CartesianProductGenerator2() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current2_; + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + ParamType current_value_; + }; // class CartesianProductGenerator2::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator2& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; +}; // class CartesianProductGenerator2 + + +template +class CartesianProductGenerator3 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator3(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3) + : g1_(g1), g2_(g2), g3_(g3) {} + virtual ~CartesianProductGenerator3() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current3_; + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + ParamType current_value_; + }; // class CartesianProductGenerator3::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator3& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; +}; // class CartesianProductGenerator3 + + +template +class CartesianProductGenerator4 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator4(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4) {} + virtual ~CartesianProductGenerator4() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current4_; + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + ParamType current_value_; + }; // class CartesianProductGenerator4::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator4& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; +}; // class CartesianProductGenerator4 + + +template +class CartesianProductGenerator5 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator5(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5) {} + virtual ~CartesianProductGenerator5() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current5_; + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + ParamType current_value_; + }; // class CartesianProductGenerator5::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator5& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; +}; // class CartesianProductGenerator5 + + +template +class CartesianProductGenerator6 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator6(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5, + const ParamGenerator& g6) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6) {} + virtual ~CartesianProductGenerator6() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5, + const ParamGenerator& g6, + const typename ParamGenerator::iterator& current6) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5), + begin6_(g6.begin()), end6_(g6.end()), current6_(current6) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current6_; + if (current6_ == end6_) { + current6_ = begin6_; + ++current5_; + } + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_ && + current6_ == typed_other->current6_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_), + begin6_(other.begin6_), + end6_(other.end6_), + current6_(other.current6_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_, *current6_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_ || + current6_ == end6_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + const typename ParamGenerator::iterator begin6_; + const typename ParamGenerator::iterator end6_; + typename ParamGenerator::iterator current6_; + ParamType current_value_; + }; // class CartesianProductGenerator6::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator6& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; + const ParamGenerator g6_; +}; // class CartesianProductGenerator6 + + +template +class CartesianProductGenerator7 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator7(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5, + const ParamGenerator& g6, const ParamGenerator& g7) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7) {} + virtual ~CartesianProductGenerator7() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_, + g7_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5, + const ParamGenerator& g6, + const typename ParamGenerator::iterator& current6, + const ParamGenerator& g7, + const typename ParamGenerator::iterator& current7) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5), + begin6_(g6.begin()), end6_(g6.end()), current6_(current6), + begin7_(g7.begin()), end7_(g7.end()), current7_(current7) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current7_; + if (current7_ == end7_) { + current7_ = begin7_; + ++current6_; + } + if (current6_ == end6_) { + current6_ = begin6_; + ++current5_; + } + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_ && + current6_ == typed_other->current6_ && + current7_ == typed_other->current7_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_), + begin6_(other.begin6_), + end6_(other.end6_), + current6_(other.current6_), + begin7_(other.begin7_), + end7_(other.end7_), + current7_(other.current7_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_, *current6_, *current7_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_ || + current6_ == end6_ || + current7_ == end7_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + const typename ParamGenerator::iterator begin6_; + const typename ParamGenerator::iterator end6_; + typename ParamGenerator::iterator current6_; + const typename ParamGenerator::iterator begin7_; + const typename ParamGenerator::iterator end7_; + typename ParamGenerator::iterator current7_; + ParamType current_value_; + }; // class CartesianProductGenerator7::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator7& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; + const ParamGenerator g6_; + const ParamGenerator g7_; +}; // class CartesianProductGenerator7 + + +template +class CartesianProductGenerator8 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator8(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5, + const ParamGenerator& g6, const ParamGenerator& g7, + const ParamGenerator& g8) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), + g8_(g8) {} + virtual ~CartesianProductGenerator8() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_, + g7_.begin(), g8_, g8_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_, + g8_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5, + const ParamGenerator& g6, + const typename ParamGenerator::iterator& current6, + const ParamGenerator& g7, + const typename ParamGenerator::iterator& current7, + const ParamGenerator& g8, + const typename ParamGenerator::iterator& current8) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5), + begin6_(g6.begin()), end6_(g6.end()), current6_(current6), + begin7_(g7.begin()), end7_(g7.end()), current7_(current7), + begin8_(g8.begin()), end8_(g8.end()), current8_(current8) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current8_; + if (current8_ == end8_) { + current8_ = begin8_; + ++current7_; + } + if (current7_ == end7_) { + current7_ = begin7_; + ++current6_; + } + if (current6_ == end6_) { + current6_ = begin6_; + ++current5_; + } + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_ && + current6_ == typed_other->current6_ && + current7_ == typed_other->current7_ && + current8_ == typed_other->current8_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_), + begin6_(other.begin6_), + end6_(other.end6_), + current6_(other.current6_), + begin7_(other.begin7_), + end7_(other.end7_), + current7_(other.current7_), + begin8_(other.begin8_), + end8_(other.end8_), + current8_(other.current8_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_, *current6_, *current7_, *current8_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_ || + current6_ == end6_ || + current7_ == end7_ || + current8_ == end8_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + const typename ParamGenerator::iterator begin6_; + const typename ParamGenerator::iterator end6_; + typename ParamGenerator::iterator current6_; + const typename ParamGenerator::iterator begin7_; + const typename ParamGenerator::iterator end7_; + typename ParamGenerator::iterator current7_; + const typename ParamGenerator::iterator begin8_; + const typename ParamGenerator::iterator end8_; + typename ParamGenerator::iterator current8_; + ParamType current_value_; + }; // class CartesianProductGenerator8::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator8& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; + const ParamGenerator g6_; + const ParamGenerator g7_; + const ParamGenerator g8_; +}; // class CartesianProductGenerator8 + + +template +class CartesianProductGenerator9 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator9(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5, + const ParamGenerator& g6, const ParamGenerator& g7, + const ParamGenerator& g8, const ParamGenerator& g9) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8), + g9_(g9) {} + virtual ~CartesianProductGenerator9() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_, + g7_.begin(), g8_, g8_.begin(), g9_, g9_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_, + g8_.end(), g9_, g9_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5, + const ParamGenerator& g6, + const typename ParamGenerator::iterator& current6, + const ParamGenerator& g7, + const typename ParamGenerator::iterator& current7, + const ParamGenerator& g8, + const typename ParamGenerator::iterator& current8, + const ParamGenerator& g9, + const typename ParamGenerator::iterator& current9) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5), + begin6_(g6.begin()), end6_(g6.end()), current6_(current6), + begin7_(g7.begin()), end7_(g7.end()), current7_(current7), + begin8_(g8.begin()), end8_(g8.end()), current8_(current8), + begin9_(g9.begin()), end9_(g9.end()), current9_(current9) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current9_; + if (current9_ == end9_) { + current9_ = begin9_; + ++current8_; + } + if (current8_ == end8_) { + current8_ = begin8_; + ++current7_; + } + if (current7_ == end7_) { + current7_ = begin7_; + ++current6_; + } + if (current6_ == end6_) { + current6_ = begin6_; + ++current5_; + } + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_ && + current6_ == typed_other->current6_ && + current7_ == typed_other->current7_ && + current8_ == typed_other->current8_ && + current9_ == typed_other->current9_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_), + begin6_(other.begin6_), + end6_(other.end6_), + current6_(other.current6_), + begin7_(other.begin7_), + end7_(other.end7_), + current7_(other.current7_), + begin8_(other.begin8_), + end8_(other.end8_), + current8_(other.current8_), + begin9_(other.begin9_), + end9_(other.end9_), + current9_(other.current9_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_, *current6_, *current7_, *current8_, + *current9_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_ || + current6_ == end6_ || + current7_ == end7_ || + current8_ == end8_ || + current9_ == end9_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + const typename ParamGenerator::iterator begin6_; + const typename ParamGenerator::iterator end6_; + typename ParamGenerator::iterator current6_; + const typename ParamGenerator::iterator begin7_; + const typename ParamGenerator::iterator end7_; + typename ParamGenerator::iterator current7_; + const typename ParamGenerator::iterator begin8_; + const typename ParamGenerator::iterator end8_; + typename ParamGenerator::iterator current8_; + const typename ParamGenerator::iterator begin9_; + const typename ParamGenerator::iterator end9_; + typename ParamGenerator::iterator current9_; + ParamType current_value_; + }; // class CartesianProductGenerator9::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator9& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; + const ParamGenerator g6_; + const ParamGenerator g7_; + const ParamGenerator g8_; + const ParamGenerator g9_; +}; // class CartesianProductGenerator9 + + +template +class CartesianProductGenerator10 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator10(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5, + const ParamGenerator& g6, const ParamGenerator& g7, + const ParamGenerator& g8, const ParamGenerator& g9, + const ParamGenerator& g10) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8), + g9_(g9), g10_(g10) {} + virtual ~CartesianProductGenerator10() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_, + g7_.begin(), g8_, g8_.begin(), g9_, g9_.begin(), g10_, g10_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_, + g8_.end(), g9_, g9_.end(), g10_, g10_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5, + const ParamGenerator& g6, + const typename ParamGenerator::iterator& current6, + const ParamGenerator& g7, + const typename ParamGenerator::iterator& current7, + const ParamGenerator& g8, + const typename ParamGenerator::iterator& current8, + const ParamGenerator& g9, + const typename ParamGenerator::iterator& current9, + const ParamGenerator& g10, + const typename ParamGenerator::iterator& current10) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5), + begin6_(g6.begin()), end6_(g6.end()), current6_(current6), + begin7_(g7.begin()), end7_(g7.end()), current7_(current7), + begin8_(g8.begin()), end8_(g8.end()), current8_(current8), + begin9_(g9.begin()), end9_(g9.end()), current9_(current9), + begin10_(g10.begin()), end10_(g10.end()), current10_(current10) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current10_; + if (current10_ == end10_) { + current10_ = begin10_; + ++current9_; + } + if (current9_ == end9_) { + current9_ = begin9_; + ++current8_; + } + if (current8_ == end8_) { + current8_ = begin8_; + ++current7_; + } + if (current7_ == end7_) { + current7_ = begin7_; + ++current6_; + } + if (current6_ == end6_) { + current6_ = begin6_; + ++current5_; + } + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_ && + current6_ == typed_other->current6_ && + current7_ == typed_other->current7_ && + current8_ == typed_other->current8_ && + current9_ == typed_other->current9_ && + current10_ == typed_other->current10_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_), + begin6_(other.begin6_), + end6_(other.end6_), + current6_(other.current6_), + begin7_(other.begin7_), + end7_(other.end7_), + current7_(other.current7_), + begin8_(other.begin8_), + end8_(other.end8_), + current8_(other.current8_), + begin9_(other.begin9_), + end9_(other.end9_), + current9_(other.current9_), + begin10_(other.begin10_), + end10_(other.end10_), + current10_(other.current10_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_, *current6_, *current7_, *current8_, + *current9_, *current10_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_ || + current6_ == end6_ || + current7_ == end7_ || + current8_ == end8_ || + current9_ == end9_ || + current10_ == end10_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + const typename ParamGenerator::iterator begin6_; + const typename ParamGenerator::iterator end6_; + typename ParamGenerator::iterator current6_; + const typename ParamGenerator::iterator begin7_; + const typename ParamGenerator::iterator end7_; + typename ParamGenerator::iterator current7_; + const typename ParamGenerator::iterator begin8_; + const typename ParamGenerator::iterator end8_; + typename ParamGenerator::iterator current8_; + const typename ParamGenerator::iterator begin9_; + const typename ParamGenerator::iterator end9_; + typename ParamGenerator::iterator current9_; + const typename ParamGenerator::iterator begin10_; + const typename ParamGenerator::iterator end10_; + typename ParamGenerator::iterator current10_; + ParamType current_value_; + }; // class CartesianProductGenerator10::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator10& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; + const ParamGenerator g6_; + const ParamGenerator g7_; + const ParamGenerator g8_; + const ParamGenerator g9_; + const ParamGenerator g10_; +}; // class CartesianProductGenerator10 + + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Helper classes providing Combine() with polymorphic features. They allow +// casting CartesianProductGeneratorN to ParamGenerator if T is +// convertible to U. +// +template +class CartesianProductHolder2 { + public: +CartesianProductHolder2(const Generator1& g1, const Generator2& g2) + : g1_(g1), g2_(g2) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator2( + static_cast >(g1_), + static_cast >(g2_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder2& other); + + const Generator1 g1_; + const Generator2 g2_; +}; // class CartesianProductHolder2 + +template +class CartesianProductHolder3 { + public: +CartesianProductHolder3(const Generator1& g1, const Generator2& g2, + const Generator3& g3) + : g1_(g1), g2_(g2), g3_(g3) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator3( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder3& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; +}; // class CartesianProductHolder3 + +template +class CartesianProductHolder4 { + public: +CartesianProductHolder4(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator4( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder4& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; +}; // class CartesianProductHolder4 + +template +class CartesianProductHolder5 { + public: +CartesianProductHolder5(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator5( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder5& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; +}; // class CartesianProductHolder5 + +template +class CartesianProductHolder6 { + public: +CartesianProductHolder6(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5, + const Generator6& g6) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator6( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_), + static_cast >(g6_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder6& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; + const Generator6 g6_; +}; // class CartesianProductHolder6 + +template +class CartesianProductHolder7 { + public: +CartesianProductHolder7(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5, + const Generator6& g6, const Generator7& g7) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator7( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_), + static_cast >(g6_), + static_cast >(g7_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder7& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; + const Generator6 g6_; + const Generator7 g7_; +}; // class CartesianProductHolder7 + +template +class CartesianProductHolder8 { + public: +CartesianProductHolder8(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5, + const Generator6& g6, const Generator7& g7, const Generator8& g8) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), + g8_(g8) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator8( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_), + static_cast >(g6_), + static_cast >(g7_), + static_cast >(g8_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder8& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; + const Generator6 g6_; + const Generator7 g7_; + const Generator8 g8_; +}; // class CartesianProductHolder8 + +template +class CartesianProductHolder9 { + public: +CartesianProductHolder9(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5, + const Generator6& g6, const Generator7& g7, const Generator8& g8, + const Generator9& g9) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8), + g9_(g9) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator9( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_), + static_cast >(g6_), + static_cast >(g7_), + static_cast >(g8_), + static_cast >(g9_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder9& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; + const Generator6 g6_; + const Generator7 g7_; + const Generator8 g8_; + const Generator9 g9_; +}; // class CartesianProductHolder9 + +template +class CartesianProductHolder10 { + public: +CartesianProductHolder10(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5, + const Generator6& g6, const Generator7& g7, const Generator8& g8, + const Generator9& g9, const Generator10& g10) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8), + g9_(g9), g10_(g10) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator10( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_), + static_cast >(g6_), + static_cast >(g7_), + static_cast >(g8_), + static_cast >(g9_), + static_cast >(g10_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder10& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; + const Generator6 g6_; + const Generator7 g7_; + const Generator8 g8_; + const Generator9 g9_; + const Generator10 g10_; +}; // class CartesianProductHolder10 + +# endif // GTEST_HAS_COMBINE + +} // namespace internal +} // namespace testing + +#endif // GTEST_HAS_PARAM_TEST + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_ + +#if GTEST_HAS_PARAM_TEST + +namespace testing { + +// Functions producing parameter generators. +// +// Google Test uses these generators to produce parameters for value- +// parameterized tests. When a parameterized test case is instantiated +// with a particular generator, Google Test creates and runs tests +// for each element in the sequence produced by the generator. +// +// In the following sample, tests from test case FooTest are instantiated +// each three times with parameter values 3, 5, and 8: +// +// class FooTest : public TestWithParam { ... }; +// +// TEST_P(FooTest, TestThis) { +// } +// TEST_P(FooTest, TestThat) { +// } +// INSTANTIATE_TEST_CASE_P(TestSequence, FooTest, Values(3, 5, 8)); +// + +// Range() returns generators providing sequences of values in a range. +// +// Synopsis: +// Range(start, end) +// - returns a generator producing a sequence of values {start, start+1, +// start+2, ..., }. +// Range(start, end, step) +// - returns a generator producing a sequence of values {start, start+step, +// start+step+step, ..., }. +// Notes: +// * The generated sequences never include end. For example, Range(1, 5) +// returns a generator producing a sequence {1, 2, 3, 4}. Range(1, 9, 2) +// returns a generator producing {1, 3, 5, 7}. +// * start and end must have the same type. That type may be any integral or +// floating-point type or a user defined type satisfying these conditions: +// * It must be assignable (have operator=() defined). +// * It must have operator+() (operator+(int-compatible type) for +// two-operand version). +// * It must have operator<() defined. +// Elements in the resulting sequences will also have that type. +// * Condition start < end must be satisfied in order for resulting sequences +// to contain any elements. +// +template +internal::ParamGenerator Range(T start, T end, IncrementT step) { + return internal::ParamGenerator( + new internal::RangeGenerator(start, end, step)); +} + +template +internal::ParamGenerator Range(T start, T end) { + return Range(start, end, 1); +} + +// ValuesIn() function allows generation of tests with parameters coming from +// a container. +// +// Synopsis: +// ValuesIn(const T (&array)[N]) +// - returns a generator producing sequences with elements from +// a C-style array. +// ValuesIn(const Container& container) +// - returns a generator producing sequences with elements from +// an STL-style container. +// ValuesIn(Iterator begin, Iterator end) +// - returns a generator producing sequences with elements from +// a range [begin, end) defined by a pair of STL-style iterators. These +// iterators can also be plain C pointers. +// +// Please note that ValuesIn copies the values from the containers +// passed in and keeps them to generate tests in RUN_ALL_TESTS(). +// +// Examples: +// +// This instantiates tests from test case StringTest +// each with C-string values of "foo", "bar", and "baz": +// +// const char* strings[] = {"foo", "bar", "baz"}; +// INSTANTIATE_TEST_CASE_P(StringSequence, SrtingTest, ValuesIn(strings)); +// +// This instantiates tests from test case StlStringTest +// each with STL strings with values "a" and "b": +// +// ::std::vector< ::std::string> GetParameterStrings() { +// ::std::vector< ::std::string> v; +// v.push_back("a"); +// v.push_back("b"); +// return v; +// } +// +// INSTANTIATE_TEST_CASE_P(CharSequence, +// StlStringTest, +// ValuesIn(GetParameterStrings())); +// +// +// This will also instantiate tests from CharTest +// each with parameter values 'a' and 'b': +// +// ::std::list GetParameterChars() { +// ::std::list list; +// list.push_back('a'); +// list.push_back('b'); +// return list; +// } +// ::std::list l = GetParameterChars(); +// INSTANTIATE_TEST_CASE_P(CharSequence2, +// CharTest, +// ValuesIn(l.begin(), l.end())); +// +template +internal::ParamGenerator< + typename ::testing::internal::IteratorTraits::value_type> +ValuesIn(ForwardIterator begin, ForwardIterator end) { + typedef typename ::testing::internal::IteratorTraits + ::value_type ParamType; + return internal::ParamGenerator( + new internal::ValuesInIteratorRangeGenerator(begin, end)); +} + +template +internal::ParamGenerator ValuesIn(const T (&array)[N]) { + return ValuesIn(array, array + N); +} + +template +internal::ParamGenerator ValuesIn( + const Container& container) { + return ValuesIn(container.begin(), container.end()); +} + +// Values() allows generating tests from explicitly specified list of +// parameters. +// +// Synopsis: +// Values(T v1, T v2, ..., T vN) +// - returns a generator producing sequences with elements v1, v2, ..., vN. +// +// For example, this instantiates tests from test case BarTest each +// with values "one", "two", and "three": +// +// INSTANTIATE_TEST_CASE_P(NumSequence, BarTest, Values("one", "two", "three")); +// +// This instantiates tests from test case BazTest each with values 1, 2, 3.5. +// The exact type of values will depend on the type of parameter in BazTest. +// +// INSTANTIATE_TEST_CASE_P(FloatingNumbers, BazTest, Values(1, 2, 3.5)); +// +// Currently, Values() supports from 1 to 50 parameters. +// +template +internal::ValueArray1 Values(T1 v1) { + return internal::ValueArray1(v1); +} + +template +internal::ValueArray2 Values(T1 v1, T2 v2) { + return internal::ValueArray2(v1, v2); +} + +template +internal::ValueArray3 Values(T1 v1, T2 v2, T3 v3) { + return internal::ValueArray3(v1, v2, v3); +} + +template +internal::ValueArray4 Values(T1 v1, T2 v2, T3 v3, T4 v4) { + return internal::ValueArray4(v1, v2, v3, v4); +} + +template +internal::ValueArray5 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5) { + return internal::ValueArray5(v1, v2, v3, v4, v5); +} + +template +internal::ValueArray6 Values(T1 v1, T2 v2, T3 v3, + T4 v4, T5 v5, T6 v6) { + return internal::ValueArray6(v1, v2, v3, v4, v5, v6); +} + +template +internal::ValueArray7 Values(T1 v1, T2 v2, T3 v3, + T4 v4, T5 v5, T6 v6, T7 v7) { + return internal::ValueArray7(v1, v2, v3, v4, v5, + v6, v7); +} + +template +internal::ValueArray8 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8) { + return internal::ValueArray8(v1, v2, v3, v4, + v5, v6, v7, v8); +} + +template +internal::ValueArray9 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9) { + return internal::ValueArray9(v1, v2, v3, + v4, v5, v6, v7, v8, v9); +} + +template +internal::ValueArray10 Values(T1 v1, + T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10) { + return internal::ValueArray10(v1, + v2, v3, v4, v5, v6, v7, v8, v9, v10); +} + +template +internal::ValueArray11 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11) { + return internal::ValueArray11(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11); +} + +template +internal::ValueArray12 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12) { + return internal::ValueArray12(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12); +} + +template +internal::ValueArray13 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13) { + return internal::ValueArray13(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13); +} + +template +internal::ValueArray14 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14) { + return internal::ValueArray14(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, + v14); +} + +template +internal::ValueArray15 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, + T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15) { + return internal::ValueArray15(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, + v13, v14, v15); +} + +template +internal::ValueArray16 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16) { + return internal::ValueArray16(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, + v12, v13, v14, v15, v16); +} + +template +internal::ValueArray17 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17) { + return internal::ValueArray17(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, + v11, v12, v13, v14, v15, v16, v17); +} + +template +internal::ValueArray18 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, + T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18) { + return internal::ValueArray18(v1, v2, v3, v4, v5, v6, v7, v8, v9, + v10, v11, v12, v13, v14, v15, v16, v17, v18); +} + +template +internal::ValueArray19 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, + T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, + T15 v15, T16 v16, T17 v17, T18 v18, T19 v19) { + return internal::ValueArray19(v1, v2, v3, v4, v5, v6, v7, v8, + v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19); +} + +template +internal::ValueArray20 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, + T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20) { + return internal::ValueArray20(v1, v2, v3, v4, v5, v6, v7, + v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20); +} + +template +internal::ValueArray21 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, + T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21) { + return internal::ValueArray21(v1, v2, v3, v4, v5, v6, + v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21); +} + +template +internal::ValueArray22 Values(T1 v1, T2 v2, T3 v3, + T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22) { + return internal::ValueArray22(v1, v2, v3, v4, + v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, + v20, v21, v22); +} + +template +internal::ValueArray23 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22, T23 v23) { + return internal::ValueArray23(v1, v2, v3, + v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, + v20, v21, v22, v23); +} + +template +internal::ValueArray24 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22, T23 v23, T24 v24) { + return internal::ValueArray24(v1, v2, + v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, + v19, v20, v21, v22, v23, v24); +} + +template +internal::ValueArray25 Values(T1 v1, + T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, + T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, + T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25) { + return internal::ValueArray25(v1, + v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, + v18, v19, v20, v21, v22, v23, v24, v25); +} + +template +internal::ValueArray26 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26) { + return internal::ValueArray26(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, + v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26); +} + +template +internal::ValueArray27 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27) { + return internal::ValueArray27(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, + v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27); +} + +template +internal::ValueArray28 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28) { + return internal::ValueArray28(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, + v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, + v28); +} + +template +internal::ValueArray29 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29) { + return internal::ValueArray29(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, + v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, + v27, v28, v29); +} + +template +internal::ValueArray30 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, + T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, + T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, + T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30) { + return internal::ValueArray30(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, + v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, + v26, v27, v28, v29, v30); +} + +template +internal::ValueArray31 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31) { + return internal::ValueArray31(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, + v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, + v25, v26, v27, v28, v29, v30, v31); +} + +template +internal::ValueArray32 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, + T32 v32) { + return internal::ValueArray32(v1, v2, v3, v4, v5, v6, v7, v8, v9, + v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, + v24, v25, v26, v27, v28, v29, v30, v31, v32); +} + +template +internal::ValueArray33 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, + T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, + T32 v32, T33 v33) { + return internal::ValueArray33(v1, v2, v3, v4, v5, v6, v7, v8, + v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, + v24, v25, v26, v27, v28, v29, v30, v31, v32, v33); +} + +template +internal::ValueArray34 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, + T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, + T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, + T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, + T31 v31, T32 v32, T33 v33, T34 v34) { + return internal::ValueArray34(v1, v2, v3, v4, v5, v6, v7, + v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, + v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34); +} + +template +internal::ValueArray35 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, + T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, + T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, + T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35) { + return internal::ValueArray35(v1, v2, v3, v4, v5, v6, + v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, + v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35); +} + +template +internal::ValueArray36 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, + T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, + T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, + T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36) { + return internal::ValueArray36(v1, v2, v3, v4, + v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, + v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, + v34, v35, v36); +} + +template +internal::ValueArray37 Values(T1 v1, T2 v2, T3 v3, + T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, + T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, + T37 v37) { + return internal::ValueArray37(v1, v2, v3, + v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, + v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, + v34, v35, v36, v37); +} + +template +internal::ValueArray38 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, + T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, + T37 v37, T38 v38) { + return internal::ValueArray38(v1, v2, + v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, + v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, + v33, v34, v35, v36, v37, v38); +} + +template +internal::ValueArray39 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, + T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, + T37 v37, T38 v38, T39 v39) { + return internal::ValueArray39(v1, + v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, + v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, + v32, v33, v34, v35, v36, v37, v38, v39); +} + +template +internal::ValueArray40 Values(T1 v1, + T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, + T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, + T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, + T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, + T36 v36, T37 v37, T38 v38, T39 v39, T40 v40) { + return internal::ValueArray40(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, + v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, + v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40); +} + +template +internal::ValueArray41 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41) { + return internal::ValueArray41(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, + v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, + v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41); +} + +template +internal::ValueArray42 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42) { + return internal::ValueArray42(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, + v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, + v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, + v42); +} + +template +internal::ValueArray43 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43) { + return internal::ValueArray43(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, + v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, + v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, + v41, v42, v43); +} + +template +internal::ValueArray44 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44) { + return internal::ValueArray44(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, + v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, + v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, + v40, v41, v42, v43, v44); +} + +template +internal::ValueArray45 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, + T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, + T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, + T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, + T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, + T41 v41, T42 v42, T43 v43, T44 v44, T45 v45) { + return internal::ValueArray45(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, + v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, + v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, + v39, v40, v41, v42, v43, v44, v45); +} + +template +internal::ValueArray46 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, + T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, + T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46) { + return internal::ValueArray46(v1, v2, v3, v4, v5, v6, v7, v8, v9, + v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, + v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, + v38, v39, v40, v41, v42, v43, v44, v45, v46); +} + +template +internal::ValueArray47 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, + T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, + T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47) { + return internal::ValueArray47(v1, v2, v3, v4, v5, v6, v7, v8, + v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, + v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, + v38, v39, v40, v41, v42, v43, v44, v45, v46, v47); +} + +template +internal::ValueArray48 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, + T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, + T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, + T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, + T48 v48) { + return internal::ValueArray48(v1, v2, v3, v4, v5, v6, v7, + v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, + v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, + v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48); +} + +template +internal::ValueArray49 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, + T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, + T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, + T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, + T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, + T39 v39, T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, + T47 v47, T48 v48, T49 v49) { + return internal::ValueArray49(v1, v2, v3, v4, v5, v6, + v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, + v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, + v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49); +} + +template +internal::ValueArray50 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, + T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, + T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, + T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, + T38 v38, T39 v39, T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, + T46 v46, T47 v47, T48 v48, T49 v49, T50 v50) { + return internal::ValueArray50(v1, v2, v3, v4, + v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, + v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, + v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, + v48, v49, v50); +} + +// Bool() allows generating tests with parameters in a set of (false, true). +// +// Synopsis: +// Bool() +// - returns a generator producing sequences with elements {false, true}. +// +// It is useful when testing code that depends on Boolean flags. Combinations +// of multiple flags can be tested when several Bool()'s are combined using +// Combine() function. +// +// In the following example all tests in the test case FlagDependentTest +// will be instantiated twice with parameters false and true. +// +// class FlagDependentTest : public testing::TestWithParam { +// virtual void SetUp() { +// external_flag = GetParam(); +// } +// } +// INSTANTIATE_TEST_CASE_P(BoolSequence, FlagDependentTest, Bool()); +// +inline internal::ParamGenerator Bool() { + return Values(false, true); +} + +# if GTEST_HAS_COMBINE +// Combine() allows the user to combine two or more sequences to produce +// values of a Cartesian product of those sequences' elements. +// +// Synopsis: +// Combine(gen1, gen2, ..., genN) +// - returns a generator producing sequences with elements coming from +// the Cartesian product of elements from the sequences generated by +// gen1, gen2, ..., genN. The sequence elements will have a type of +// tuple where T1, T2, ..., TN are the types +// of elements from sequences produces by gen1, gen2, ..., genN. +// +// Combine can have up to 10 arguments. This number is currently limited +// by the maximum number of elements in the tuple implementation used by Google +// Test. +// +// Example: +// +// This will instantiate tests in test case AnimalTest each one with +// the parameter values tuple("cat", BLACK), tuple("cat", WHITE), +// tuple("dog", BLACK), and tuple("dog", WHITE): +// +// enum Color { BLACK, GRAY, WHITE }; +// class AnimalTest +// : public testing::TestWithParam > {...}; +// +// TEST_P(AnimalTest, AnimalLooksNice) {...} +// +// INSTANTIATE_TEST_CASE_P(AnimalVariations, AnimalTest, +// Combine(Values("cat", "dog"), +// Values(BLACK, WHITE))); +// +// This will instantiate tests in FlagDependentTest with all variations of two +// Boolean flags: +// +// class FlagDependentTest +// : public testing::TestWithParam > { +// virtual void SetUp() { +// // Assigns external_flag_1 and external_flag_2 values from the tuple. +// tie(external_flag_1, external_flag_2) = GetParam(); +// } +// }; +// +// TEST_P(FlagDependentTest, TestFeature1) { +// // Test your code using external_flag_1 and external_flag_2 here. +// } +// INSTANTIATE_TEST_CASE_P(TwoBoolSequence, FlagDependentTest, +// Combine(Bool(), Bool())); +// +template +internal::CartesianProductHolder2 Combine( + const Generator1& g1, const Generator2& g2) { + return internal::CartesianProductHolder2( + g1, g2); +} + +template +internal::CartesianProductHolder3 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3) { + return internal::CartesianProductHolder3( + g1, g2, g3); +} + +template +internal::CartesianProductHolder4 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4) { + return internal::CartesianProductHolder4( + g1, g2, g3, g4); +} + +template +internal::CartesianProductHolder5 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5) { + return internal::CartesianProductHolder5( + g1, g2, g3, g4, g5); +} + +template +internal::CartesianProductHolder6 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5, const Generator6& g6) { + return internal::CartesianProductHolder6( + g1, g2, g3, g4, g5, g6); +} + +template +internal::CartesianProductHolder7 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5, const Generator6& g6, + const Generator7& g7) { + return internal::CartesianProductHolder7( + g1, g2, g3, g4, g5, g6, g7); +} + +template +internal::CartesianProductHolder8 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5, const Generator6& g6, + const Generator7& g7, const Generator8& g8) { + return internal::CartesianProductHolder8( + g1, g2, g3, g4, g5, g6, g7, g8); +} + +template +internal::CartesianProductHolder9 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5, const Generator6& g6, + const Generator7& g7, const Generator8& g8, const Generator9& g9) { + return internal::CartesianProductHolder9( + g1, g2, g3, g4, g5, g6, g7, g8, g9); +} + +template +internal::CartesianProductHolder10 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5, const Generator6& g6, + const Generator7& g7, const Generator8& g8, const Generator9& g9, + const Generator10& g10) { + return internal::CartesianProductHolder10( + g1, g2, g3, g4, g5, g6, g7, g8, g9, g10); +} +# endif // GTEST_HAS_COMBINE + + + +# define TEST_P(test_case_name, test_name) \ + class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \ + : public test_case_name { \ + public: \ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {} \ + virtual void TestBody(); \ + private: \ + static int AddToRegistry() { \ + ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \ + GetTestCasePatternHolder(\ + #test_case_name, __FILE__, __LINE__)->AddTestPattern(\ + #test_case_name, \ + #test_name, \ + new ::testing::internal::TestMetaFactory< \ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>()); \ + return 0; \ + } \ + static int gtest_registering_dummy_; \ + GTEST_DISALLOW_COPY_AND_ASSIGN_(\ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)); \ + }; \ + int GTEST_TEST_CLASS_NAME_(test_case_name, \ + test_name)::gtest_registering_dummy_ = \ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::AddToRegistry(); \ + void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody() + +# define INSTANTIATE_TEST_CASE_P(prefix, test_case_name, generator) \ + ::testing::internal::ParamGenerator \ + gtest_##prefix##test_case_name##_EvalGenerator_() { return generator; } \ + int gtest_##prefix##test_case_name##_dummy_ = \ + ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \ + GetTestCasePatternHolder(\ + #test_case_name, __FILE__, __LINE__)->AddTestCaseInstantiation(\ + #prefix, \ + >est_##prefix##test_case_name##_EvalGenerator_, \ + __FILE__, __LINE__) + +} // namespace testing + +#endif // GTEST_HAS_PARAM_TEST + +#endif // GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_ +// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// Google C++ Testing Framework definitions useful in production code. + +#ifndef GTEST_INCLUDE_GTEST_GTEST_PROD_H_ +#define GTEST_INCLUDE_GTEST_GTEST_PROD_H_ + +// When you need to test the private or protected members of a class, +// use the FRIEND_TEST macro to declare your tests as friends of the +// class. For example: +// +// class MyClass { +// private: +// void MyMethod(); +// FRIEND_TEST(MyClassTest, MyMethod); +// }; +// +// class MyClassTest : public testing::Test { +// // ... +// }; +// +// TEST_F(MyClassTest, MyMethod) { +// // Can call MyClass::MyMethod() here. +// } + +#define FRIEND_TEST(test_case_name, test_name)\ +friend class test_case_name##_##test_name##_Test + +#endif // GTEST_INCLUDE_GTEST_GTEST_PROD_H_ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: mheule@google.com (Markus Heule) +// + +#ifndef GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_ +#define GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_ + +#include +#include + +namespace testing { + +// A copyable object representing the result of a test part (i.e. an +// assertion or an explicit FAIL(), ADD_FAILURE(), or SUCCESS()). +// +// Don't inherit from TestPartResult as its destructor is not virtual. +class GTEST_API_ TestPartResult { + public: + // The possible outcomes of a test part (i.e. an assertion or an + // explicit SUCCEED(), FAIL(), or ADD_FAILURE()). + enum Type { + kSuccess, // Succeeded. + kNonFatalFailure, // Failed but the test can continue. + kFatalFailure // Failed and the test should be terminated. + }; + + // C'tor. TestPartResult does NOT have a default constructor. + // Always use this constructor (with parameters) to create a + // TestPartResult object. + TestPartResult(Type a_type, + const char* a_file_name, + int a_line_number, + const char* a_message) + : type_(a_type), + file_name_(a_file_name == NULL ? "" : a_file_name), + line_number_(a_line_number), + summary_(ExtractSummary(a_message)), + message_(a_message) { + } + + // Gets the outcome of the test part. + Type type() const { return type_; } + + // Gets the name of the source file where the test part took place, or + // NULL if it's unknown. + const char* file_name() const { + return file_name_.empty() ? NULL : file_name_.c_str(); + } + + // Gets the line in the source file where the test part took place, + // or -1 if it's unknown. + int line_number() const { return line_number_; } + + // Gets the summary of the failure message. + const char* summary() const { return summary_.c_str(); } + + // Gets the message associated with the test part. + const char* message() const { return message_.c_str(); } + + // Returns true iff the test part passed. + bool passed() const { return type_ == kSuccess; } + + // Returns true iff the test part failed. + bool failed() const { return type_ != kSuccess; } + + // Returns true iff the test part non-fatally failed. + bool nonfatally_failed() const { return type_ == kNonFatalFailure; } + + // Returns true iff the test part fatally failed. + bool fatally_failed() const { return type_ == kFatalFailure; } + + private: + Type type_; + + // Gets the summary of the failure message by omitting the stack + // trace in it. + static std::string ExtractSummary(const char* message); + + // The name of the source file where the test part took place, or + // "" if the source file is unknown. + std::string file_name_; + // The line in the source file where the test part took place, or -1 + // if the line number is unknown. + int line_number_; + std::string summary_; // The test failure summary. + std::string message_; // The test failure message. +}; + +// Prints a TestPartResult object. +std::ostream& operator<<(std::ostream& os, const TestPartResult& result); + +// An array of TestPartResult objects. +// +// Don't inherit from TestPartResultArray as its destructor is not +// virtual. +class GTEST_API_ TestPartResultArray { + public: + TestPartResultArray() {} + + // Appends the given TestPartResult to the array. + void Append(const TestPartResult& result); + + // Returns the TestPartResult at the given index (0-based). + const TestPartResult& GetTestPartResult(int index) const; + + // Returns the number of TestPartResult objects in the array. + int size() const; + + private: + std::vector array_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestPartResultArray); +}; + +// This interface knows how to report a test part result. +class TestPartResultReporterInterface { + public: + virtual ~TestPartResultReporterInterface() {} + + virtual void ReportTestPartResult(const TestPartResult& result) = 0; +}; + +namespace internal { + +// This helper class is used by {ASSERT|EXPECT}_NO_FATAL_FAILURE to check if a +// statement generates new fatal failures. To do so it registers itself as the +// current test part result reporter. Besides checking if fatal failures were +// reported, it only delegates the reporting to the former result reporter. +// The original result reporter is restored in the destructor. +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +class GTEST_API_ HasNewFatalFailureHelper + : public TestPartResultReporterInterface { + public: + HasNewFatalFailureHelper(); + virtual ~HasNewFatalFailureHelper(); + virtual void ReportTestPartResult(const TestPartResult& result); + bool has_new_fatal_failure() const { return has_new_fatal_failure_; } + private: + bool has_new_fatal_failure_; + TestPartResultReporterInterface* original_reporter_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(HasNewFatalFailureHelper); +}; + +} // namespace internal + +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_ +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +#ifndef GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_ +#define GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_ + +// This header implements typed tests and type-parameterized tests. + +// Typed (aka type-driven) tests repeat the same test for types in a +// list. You must know which types you want to test with when writing +// typed tests. Here's how you do it: + +#if 0 + +// First, define a fixture class template. It should be parameterized +// by a type. Remember to derive it from testing::Test. +template +class FooTest : public testing::Test { + public: + ... + typedef std::list List; + static T shared_; + T value_; +}; + +// Next, associate a list of types with the test case, which will be +// repeated for each type in the list. The typedef is necessary for +// the macro to parse correctly. +typedef testing::Types MyTypes; +TYPED_TEST_CASE(FooTest, MyTypes); + +// If the type list contains only one type, you can write that type +// directly without Types<...>: +// TYPED_TEST_CASE(FooTest, int); + +// Then, use TYPED_TEST() instead of TEST_F() to define as many typed +// tests for this test case as you want. +TYPED_TEST(FooTest, DoesBlah) { + // Inside a test, refer to TypeParam to get the type parameter. + // Since we are inside a derived class template, C++ requires use to + // visit the members of FooTest via 'this'. + TypeParam n = this->value_; + + // To visit static members of the fixture, add the TestFixture:: + // prefix. + n += TestFixture::shared_; + + // To refer to typedefs in the fixture, add the "typename + // TestFixture::" prefix. + typename TestFixture::List values; + values.push_back(n); + ... +} + +TYPED_TEST(FooTest, HasPropertyA) { ... } + +#endif // 0 + +// Type-parameterized tests are abstract test patterns parameterized +// by a type. Compared with typed tests, type-parameterized tests +// allow you to define the test pattern without knowing what the type +// parameters are. The defined pattern can be instantiated with +// different types any number of times, in any number of translation +// units. +// +// If you are designing an interface or concept, you can define a +// suite of type-parameterized tests to verify properties that any +// valid implementation of the interface/concept should have. Then, +// each implementation can easily instantiate the test suite to verify +// that it conforms to the requirements, without having to write +// similar tests repeatedly. Here's an example: + +#if 0 + +// First, define a fixture class template. It should be parameterized +// by a type. Remember to derive it from testing::Test. +template +class FooTest : public testing::Test { + ... +}; + +// Next, declare that you will define a type-parameterized test case +// (the _P suffix is for "parameterized" or "pattern", whichever you +// prefer): +TYPED_TEST_CASE_P(FooTest); + +// Then, use TYPED_TEST_P() to define as many type-parameterized tests +// for this type-parameterized test case as you want. +TYPED_TEST_P(FooTest, DoesBlah) { + // Inside a test, refer to TypeParam to get the type parameter. + TypeParam n = 0; + ... +} + +TYPED_TEST_P(FooTest, HasPropertyA) { ... } + +// Now the tricky part: you need to register all test patterns before +// you can instantiate them. The first argument of the macro is the +// test case name; the rest are the names of the tests in this test +// case. +REGISTER_TYPED_TEST_CASE_P(FooTest, + DoesBlah, HasPropertyA); + +// Finally, you are free to instantiate the pattern with the types you +// want. If you put the above code in a header file, you can #include +// it in multiple C++ source files and instantiate it multiple times. +// +// To distinguish different instances of the pattern, the first +// argument to the INSTANTIATE_* macro is a prefix that will be added +// to the actual test case name. Remember to pick unique prefixes for +// different instances. +typedef testing::Types MyTypes; +INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, MyTypes); + +// If the type list contains only one type, you can write that type +// directly without Types<...>: +// INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, int); + +#endif // 0 + + +// Implements typed tests. + +#if GTEST_HAS_TYPED_TEST + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Expands to the name of the typedef for the type parameters of the +// given test case. +# define GTEST_TYPE_PARAMS_(TestCaseName) gtest_type_params_##TestCaseName##_ + +// The 'Types' template argument below must have spaces around it +// since some compilers may choke on '>>' when passing a template +// instance (e.g. Types) +# define TYPED_TEST_CASE(CaseName, Types) \ + typedef ::testing::internal::TypeList< Types >::type \ + GTEST_TYPE_PARAMS_(CaseName) + +# define TYPED_TEST(CaseName, TestName) \ + template \ + class GTEST_TEST_CLASS_NAME_(CaseName, TestName) \ + : public CaseName { \ + private: \ + typedef CaseName TestFixture; \ + typedef gtest_TypeParam_ TypeParam; \ + virtual void TestBody(); \ + }; \ + bool gtest_##CaseName##_##TestName##_registered_ GTEST_ATTRIBUTE_UNUSED_ = \ + ::testing::internal::TypeParameterizedTest< \ + CaseName, \ + ::testing::internal::TemplateSel< \ + GTEST_TEST_CLASS_NAME_(CaseName, TestName)>, \ + GTEST_TYPE_PARAMS_(CaseName)>::Register(\ + "", #CaseName, #TestName, 0); \ + template \ + void GTEST_TEST_CLASS_NAME_(CaseName, TestName)::TestBody() + +#endif // GTEST_HAS_TYPED_TEST + +// Implements type-parameterized tests. + +#if GTEST_HAS_TYPED_TEST_P + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Expands to the namespace name that the type-parameterized tests for +// the given type-parameterized test case are defined in. The exact +// name of the namespace is subject to change without notice. +# define GTEST_CASE_NAMESPACE_(TestCaseName) \ + gtest_case_##TestCaseName##_ + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Expands to the name of the variable used to remember the names of +// the defined tests in the given test case. +# define GTEST_TYPED_TEST_CASE_P_STATE_(TestCaseName) \ + gtest_typed_test_case_p_state_##TestCaseName##_ + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE DIRECTLY. +// +// Expands to the name of the variable used to remember the names of +// the registered tests in the given test case. +# define GTEST_REGISTERED_TEST_NAMES_(TestCaseName) \ + gtest_registered_test_names_##TestCaseName##_ + +// The variables defined in the type-parameterized test macros are +// static as typically these macros are used in a .h file that can be +// #included in multiple translation units linked together. +# define TYPED_TEST_CASE_P(CaseName) \ + static ::testing::internal::TypedTestCasePState \ + GTEST_TYPED_TEST_CASE_P_STATE_(CaseName) + +# define TYPED_TEST_P(CaseName, TestName) \ + namespace GTEST_CASE_NAMESPACE_(CaseName) { \ + template \ + class TestName : public CaseName { \ + private: \ + typedef CaseName TestFixture; \ + typedef gtest_TypeParam_ TypeParam; \ + virtual void TestBody(); \ + }; \ + static bool gtest_##TestName##_defined_ GTEST_ATTRIBUTE_UNUSED_ = \ + GTEST_TYPED_TEST_CASE_P_STATE_(CaseName).AddTestName(\ + __FILE__, __LINE__, #CaseName, #TestName); \ + } \ + template \ + void GTEST_CASE_NAMESPACE_(CaseName)::TestName::TestBody() + +# define REGISTER_TYPED_TEST_CASE_P(CaseName, ...) \ + namespace GTEST_CASE_NAMESPACE_(CaseName) { \ + typedef ::testing::internal::Templates<__VA_ARGS__>::type gtest_AllTests_; \ + } \ + static const char* const GTEST_REGISTERED_TEST_NAMES_(CaseName) = \ + GTEST_TYPED_TEST_CASE_P_STATE_(CaseName).VerifyRegisteredTestNames(\ + __FILE__, __LINE__, #__VA_ARGS__) + +// The 'Types' template argument below must have spaces around it +// since some compilers may choke on '>>' when passing a template +// instance (e.g. Types) +# define INSTANTIATE_TYPED_TEST_CASE_P(Prefix, CaseName, Types) \ + bool gtest_##Prefix##_##CaseName GTEST_ATTRIBUTE_UNUSED_ = \ + ::testing::internal::TypeParameterizedTestCase::type>::Register(\ + #Prefix, #CaseName, GTEST_REGISTERED_TEST_NAMES_(CaseName)) + +#endif // GTEST_HAS_TYPED_TEST_P + +#endif // GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_ + +// Depending on the platform, different string classes are available. +// On Linux, in addition to ::std::string, Google also makes use of +// class ::string, which has the same interface as ::std::string, but +// has a different implementation. +// +// The user can define GTEST_HAS_GLOBAL_STRING to 1 to indicate that +// ::string is available AND is a distinct type to ::std::string, or +// define it to 0 to indicate otherwise. +// +// If the user's ::std::string and ::string are the same class due to +// aliasing, he should define GTEST_HAS_GLOBAL_STRING to 0. +// +// If the user doesn't define GTEST_HAS_GLOBAL_STRING, it is defined +// heuristically. + +namespace testing { + +// Declares the flags. + +// This flag temporary enables the disabled tests. +GTEST_DECLARE_bool_(also_run_disabled_tests); + +// This flag brings the debugger on an assertion failure. +GTEST_DECLARE_bool_(break_on_failure); + +// This flag controls whether Google Test catches all test-thrown exceptions +// and logs them as failures. +GTEST_DECLARE_bool_(catch_exceptions); + +// This flag enables using colors in terminal output. Available values are +// "yes" to enable colors, "no" (disable colors), or "auto" (the default) +// to let Google Test decide. +GTEST_DECLARE_string_(color); + +// This flag sets up the filter to select by name using a glob pattern +// the tests to run. If the filter is not given all tests are executed. +GTEST_DECLARE_string_(filter); + +// This flag causes the Google Test to list tests. None of the tests listed +// are actually run if the flag is provided. +GTEST_DECLARE_bool_(list_tests); + +// This flag controls whether Google Test emits a detailed XML report to a file +// in addition to its normal textual output. +GTEST_DECLARE_string_(output); + +// This flags control whether Google Test prints the elapsed time for each +// test. +GTEST_DECLARE_bool_(print_time); + +// This flag specifies the random number seed. +GTEST_DECLARE_int32_(random_seed); + +// This flag sets how many times the tests are repeated. The default value +// is 1. If the value is -1 the tests are repeating forever. +GTEST_DECLARE_int32_(repeat); + +// This flag controls whether Google Test includes Google Test internal +// stack frames in failure stack traces. +GTEST_DECLARE_bool_(show_internal_stack_frames); + +// When this flag is specified, tests' order is randomized on every iteration. +GTEST_DECLARE_bool_(shuffle); + +// This flag specifies the maximum number of stack frames to be +// printed in a failure message. +GTEST_DECLARE_int32_(stack_trace_depth); + +// When this flag is specified, a failed assertion will throw an +// exception if exceptions are enabled, or exit the program with a +// non-zero code otherwise. +GTEST_DECLARE_bool_(throw_on_failure); + +// When this flag is set with a "host:port" string, on supported +// platforms test results are streamed to the specified port on +// the specified host machine. +GTEST_DECLARE_string_(stream_result_to); + +// The upper limit for valid stack trace depths. +const int kMaxStackTraceDepth = 100; + +namespace internal { + +class AssertHelper; +class DefaultGlobalTestPartResultReporter; +class ExecDeathTest; +class NoExecDeathTest; +class FinalSuccessChecker; +class GTestFlagSaver; +class StreamingListenerTest; +class TestResultAccessor; +class TestEventListenersAccessor; +class TestEventRepeater; +class UnitTestRecordPropertyTestHelper; +class WindowsDeathTest; +class UnitTestImpl* GetUnitTestImpl(); +void ReportFailureInUnknownLocation(TestPartResult::Type result_type, + const std::string& message); + +} // namespace internal + +// The friend relationship of some of these classes is cyclic. +// If we don't forward declare them the compiler might confuse the classes +// in friendship clauses with same named classes on the scope. +class Test; +class TestCase; +class TestInfo; +class UnitTest; + +// A class for indicating whether an assertion was successful. When +// the assertion wasn't successful, the AssertionResult object +// remembers a non-empty message that describes how it failed. +// +// To create an instance of this class, use one of the factory functions +// (AssertionSuccess() and AssertionFailure()). +// +// This class is useful for two purposes: +// 1. Defining predicate functions to be used with Boolean test assertions +// EXPECT_TRUE/EXPECT_FALSE and their ASSERT_ counterparts +// 2. Defining predicate-format functions to be +// used with predicate assertions (ASSERT_PRED_FORMAT*, etc). +// +// For example, if you define IsEven predicate: +// +// testing::AssertionResult IsEven(int n) { +// if ((n % 2) == 0) +// return testing::AssertionSuccess(); +// else +// return testing::AssertionFailure() << n << " is odd"; +// } +// +// Then the failed expectation EXPECT_TRUE(IsEven(Fib(5))) +// will print the message +// +// Value of: IsEven(Fib(5)) +// Actual: false (5 is odd) +// Expected: true +// +// instead of a more opaque +// +// Value of: IsEven(Fib(5)) +// Actual: false +// Expected: true +// +// in case IsEven is a simple Boolean predicate. +// +// If you expect your predicate to be reused and want to support informative +// messages in EXPECT_FALSE and ASSERT_FALSE (negative assertions show up +// about half as often as positive ones in our tests), supply messages for +// both success and failure cases: +// +// testing::AssertionResult IsEven(int n) { +// if ((n % 2) == 0) +// return testing::AssertionSuccess() << n << " is even"; +// else +// return testing::AssertionFailure() << n << " is odd"; +// } +// +// Then a statement EXPECT_FALSE(IsEven(Fib(6))) will print +// +// Value of: IsEven(Fib(6)) +// Actual: true (8 is even) +// Expected: false +// +// NB: Predicates that support negative Boolean assertions have reduced +// performance in positive ones so be careful not to use them in tests +// that have lots (tens of thousands) of positive Boolean assertions. +// +// To use this class with EXPECT_PRED_FORMAT assertions such as: +// +// // Verifies that Foo() returns an even number. +// EXPECT_PRED_FORMAT1(IsEven, Foo()); +// +// you need to define: +// +// testing::AssertionResult IsEven(const char* expr, int n) { +// if ((n % 2) == 0) +// return testing::AssertionSuccess(); +// else +// return testing::AssertionFailure() +// << "Expected: " << expr << " is even\n Actual: it's " << n; +// } +// +// If Foo() returns 5, you will see the following message: +// +// Expected: Foo() is even +// Actual: it's 5 +// +class GTEST_API_ AssertionResult { + public: + // Copy constructor. + // Used in EXPECT_TRUE/FALSE(assertion_result). + AssertionResult(const AssertionResult& other); + // Used in the EXPECT_TRUE/FALSE(bool_expression). + explicit AssertionResult(bool success) : success_(success) {} + + // Returns true iff the assertion succeeded. + operator bool() const { return success_; } // NOLINT + + // Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE. + AssertionResult operator!() const; + + // Returns the text streamed into this AssertionResult. Test assertions + // use it when they fail (i.e., the predicate's outcome doesn't match the + // assertion's expectation). When nothing has been streamed into the + // object, returns an empty string. + const char* message() const { + return message_.get() != NULL ? message_->c_str() : ""; + } + // TODO(vladl@google.com): Remove this after making sure no clients use it. + // Deprecated; please use message() instead. + const char* failure_message() const { return message(); } + + // Streams a custom failure message into this object. + template AssertionResult& operator<<(const T& value) { + AppendMessage(Message() << value); + return *this; + } + + // Allows streaming basic output manipulators such as endl or flush into + // this object. + AssertionResult& operator<<( + ::std::ostream& (*basic_manipulator)(::std::ostream& stream)) { + AppendMessage(Message() << basic_manipulator); + return *this; + } + + private: + // Appends the contents of message to message_. + void AppendMessage(const Message& a_message) { + if (message_.get() == NULL) + message_.reset(new ::std::string); + message_->append(a_message.GetString().c_str()); + } + + // Stores result of the assertion predicate. + bool success_; + // Stores the message describing the condition in case the expectation + // construct is not satisfied with the predicate's outcome. + // Referenced via a pointer to avoid taking too much stack frame space + // with test assertions. + internal::scoped_ptr< ::std::string> message_; + + GTEST_DISALLOW_ASSIGN_(AssertionResult); +}; + +// Makes a successful assertion result. +GTEST_API_ AssertionResult AssertionSuccess(); + +// Makes a failed assertion result. +GTEST_API_ AssertionResult AssertionFailure(); + +// Makes a failed assertion result with the given failure message. +// Deprecated; use AssertionFailure() << msg. +GTEST_API_ AssertionResult AssertionFailure(const Message& msg); + +// The abstract class that all tests inherit from. +// +// In Google Test, a unit test program contains one or many TestCases, and +// each TestCase contains one or many Tests. +// +// When you define a test using the TEST macro, you don't need to +// explicitly derive from Test - the TEST macro automatically does +// this for you. +// +// The only time you derive from Test is when defining a test fixture +// to be used a TEST_F. For example: +// +// class FooTest : public testing::Test { +// protected: +// virtual void SetUp() { ... } +// virtual void TearDown() { ... } +// ... +// }; +// +// TEST_F(FooTest, Bar) { ... } +// TEST_F(FooTest, Baz) { ... } +// +// Test is not copyable. +class GTEST_API_ Test { + public: + friend class TestInfo; + + // Defines types for pointers to functions that set up and tear down + // a test case. + typedef internal::SetUpTestCaseFunc SetUpTestCaseFunc; + typedef internal::TearDownTestCaseFunc TearDownTestCaseFunc; + + // The d'tor is virtual as we intend to inherit from Test. + virtual ~Test(); + + // Sets up the stuff shared by all tests in this test case. + // + // Google Test will call Foo::SetUpTestCase() before running the first + // test in test case Foo. Hence a sub-class can define its own + // SetUpTestCase() method to shadow the one defined in the super + // class. + static void SetUpTestCase() {} + + // Tears down the stuff shared by all tests in this test case. + // + // Google Test will call Foo::TearDownTestCase() after running the last + // test in test case Foo. Hence a sub-class can define its own + // TearDownTestCase() method to shadow the one defined in the super + // class. + static void TearDownTestCase() {} + + // Returns true iff the current test has a fatal failure. + static bool HasFatalFailure(); + + // Returns true iff the current test has a non-fatal failure. + static bool HasNonfatalFailure(); + + // Returns true iff the current test has a (either fatal or + // non-fatal) failure. + static bool HasFailure() { return HasFatalFailure() || HasNonfatalFailure(); } + + // Logs a property for the current test, test case, or for the entire + // invocation of the test program when used outside of the context of a + // test case. Only the last value for a given key is remembered. These + // are public static so they can be called from utility functions that are + // not members of the test fixture. Calls to RecordProperty made during + // lifespan of the test (from the moment its constructor starts to the + // moment its destructor finishes) will be output in XML as attributes of + // the element. Properties recorded from fixture's + // SetUpTestCase or TearDownTestCase are logged as attributes of the + // corresponding element. Calls to RecordProperty made in the + // global context (before or after invocation of RUN_ALL_TESTS and from + // SetUp/TearDown method of Environment objects registered with Google + // Test) will be output as attributes of the element. + static void RecordProperty(const std::string& key, const std::string& value); + static void RecordProperty(const std::string& key, int value); + + protected: + // Creates a Test object. + Test(); + + // Sets up the test fixture. + virtual void SetUp(); + + // Tears down the test fixture. + virtual void TearDown(); + + private: + // Returns true iff the current test has the same fixture class as + // the first test in the current test case. + static bool HasSameFixtureClass(); + + // Runs the test after the test fixture has been set up. + // + // A sub-class must implement this to define the test logic. + // + // DO NOT OVERRIDE THIS FUNCTION DIRECTLY IN A USER PROGRAM. + // Instead, use the TEST or TEST_F macro. + virtual void TestBody() = 0; + + // Sets up, executes, and tears down the test. + void Run(); + + // Deletes self. We deliberately pick an unusual name for this + // internal method to avoid clashing with names used in user TESTs. + void DeleteSelf_() { delete this; } + + // Uses a GTestFlagSaver to save and restore all Google Test flags. + const internal::GTestFlagSaver* const gtest_flag_saver_; + + // Often a user mis-spells SetUp() as Setup() and spends a long time + // wondering why it is never called by Google Test. The declaration of + // the following method is solely for catching such an error at + // compile time: + // + // - The return type is deliberately chosen to be not void, so it + // will be a conflict if a user declares void Setup() in his test + // fixture. + // + // - This method is private, so it will be another compiler error + // if a user calls it from his test fixture. + // + // DO NOT OVERRIDE THIS FUNCTION. + // + // If you see an error about overriding the following function or + // about it being private, you have mis-spelled SetUp() as Setup(). + struct Setup_should_be_spelled_SetUp {}; + virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; } + + // We disallow copying Tests. + GTEST_DISALLOW_COPY_AND_ASSIGN_(Test); +}; + +typedef internal::TimeInMillis TimeInMillis; + +// A copyable object representing a user specified test property which can be +// output as a key/value string pair. +// +// Don't inherit from TestProperty as its destructor is not virtual. +class TestProperty { + public: + // C'tor. TestProperty does NOT have a default constructor. + // Always use this constructor (with parameters) to create a + // TestProperty object. + TestProperty(const std::string& a_key, const std::string& a_value) : + key_(a_key), value_(a_value) { + } + + // Gets the user supplied key. + const char* key() const { + return key_.c_str(); + } + + // Gets the user supplied value. + const char* value() const { + return value_.c_str(); + } + + // Sets a new value, overriding the one supplied in the constructor. + void SetValue(const std::string& new_value) { + value_ = new_value; + } + + private: + // The key supplied by the user. + std::string key_; + // The value supplied by the user. + std::string value_; +}; + +// The result of a single Test. This includes a list of +// TestPartResults, a list of TestProperties, a count of how many +// death tests there are in the Test, and how much time it took to run +// the Test. +// +// TestResult is not copyable. +class GTEST_API_ TestResult { + public: + // Creates an empty TestResult. + TestResult(); + + // D'tor. Do not inherit from TestResult. + ~TestResult(); + + // Gets the number of all test parts. This is the sum of the number + // of successful test parts and the number of failed test parts. + int total_part_count() const; + + // Returns the number of the test properties. + int test_property_count() const; + + // Returns true iff the test passed (i.e. no test part failed). + bool Passed() const { return !Failed(); } + + // Returns true iff the test failed. + bool Failed() const; + + // Returns true iff the test fatally failed. + bool HasFatalFailure() const; + + // Returns true iff the test has a non-fatal failure. + bool HasNonfatalFailure() const; + + // Returns the elapsed time, in milliseconds. + TimeInMillis elapsed_time() const { return elapsed_time_; } + + // Returns the i-th test part result among all the results. i can range + // from 0 to test_property_count() - 1. If i is not in that range, aborts + // the program. + const TestPartResult& GetTestPartResult(int i) const; + + // Returns the i-th test property. i can range from 0 to + // test_property_count() - 1. If i is not in that range, aborts the + // program. + const TestProperty& GetTestProperty(int i) const; + + private: + friend class TestInfo; + friend class TestCase; + friend class UnitTest; + friend class internal::DefaultGlobalTestPartResultReporter; + friend class internal::ExecDeathTest; + friend class internal::TestResultAccessor; + friend class internal::UnitTestImpl; + friend class internal::WindowsDeathTest; + + // Gets the vector of TestPartResults. + const std::vector& test_part_results() const { + return test_part_results_; + } + + // Gets the vector of TestProperties. + const std::vector& test_properties() const { + return test_properties_; + } + + // Sets the elapsed time. + void set_elapsed_time(TimeInMillis elapsed) { elapsed_time_ = elapsed; } + + // Adds a test property to the list. The property is validated and may add + // a non-fatal failure if invalid (e.g., if it conflicts with reserved + // key names). If a property is already recorded for the same key, the + // value will be updated, rather than storing multiple values for the same + // key. xml_element specifies the element for which the property is being + // recorded and is used for validation. + void RecordProperty(const std::string& xml_element, + const TestProperty& test_property); + + // Adds a failure if the key is a reserved attribute of Google Test + // testcase tags. Returns true if the property is valid. + // TODO(russr): Validate attribute names are legal and human readable. + static bool ValidateTestProperty(const std::string& xml_element, + const TestProperty& test_property); + + // Adds a test part result to the list. + void AddTestPartResult(const TestPartResult& test_part_result); + + // Returns the death test count. + int death_test_count() const { return death_test_count_; } + + // Increments the death test count, returning the new count. + int increment_death_test_count() { return ++death_test_count_; } + + // Clears the test part results. + void ClearTestPartResults(); + + // Clears the object. + void Clear(); + + // Protects mutable state of the property vector and of owned + // properties, whose values may be updated. + internal::Mutex test_properites_mutex_; + + // The vector of TestPartResults + std::vector test_part_results_; + // The vector of TestProperties + std::vector test_properties_; + // Running count of death tests. + int death_test_count_; + // The elapsed time, in milliseconds. + TimeInMillis elapsed_time_; + + // We disallow copying TestResult. + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestResult); +}; // class TestResult + +// A TestInfo object stores the following information about a test: +// +// Test case name +// Test name +// Whether the test should be run +// A function pointer that creates the test object when invoked +// Test result +// +// The constructor of TestInfo registers itself with the UnitTest +// singleton such that the RUN_ALL_TESTS() macro knows which tests to +// run. +class GTEST_API_ TestInfo { + public: + // Destructs a TestInfo object. This function is not virtual, so + // don't inherit from TestInfo. + ~TestInfo(); + + // Returns the test case name. + const char* test_case_name() const { return test_case_name_.c_str(); } + + // Returns the test name. + const char* name() const { return name_.c_str(); } + + // Returns the name of the parameter type, or NULL if this is not a typed + // or a type-parameterized test. + const char* type_param() const { + if (type_param_.get() != NULL) + return type_param_->c_str(); + return NULL; + } + + // Returns the text representation of the value parameter, or NULL if this + // is not a value-parameterized test. + const char* value_param() const { + if (value_param_.get() != NULL) + return value_param_->c_str(); + return NULL; + } + + // Returns true if this test should run, that is if the test is not + // disabled (or it is disabled but the also_run_disabled_tests flag has + // been specified) and its full name matches the user-specified filter. + // + // Google Test allows the user to filter the tests by their full names. + // The full name of a test Bar in test case Foo is defined as + // "Foo.Bar". Only the tests that match the filter will run. + // + // A filter is a colon-separated list of glob (not regex) patterns, + // optionally followed by a '-' and a colon-separated list of + // negative patterns (tests to exclude). A test is run if it + // matches one of the positive patterns and does not match any of + // the negative patterns. + // + // For example, *A*:Foo.* is a filter that matches any string that + // contains the character 'A' or starts with "Foo.". + bool should_run() const { return should_run_; } + + // Returns true iff this test will appear in the XML report. + bool is_reportable() const { + // For now, the XML report includes all tests matching the filter. + // In the future, we may trim tests that are excluded because of + // sharding. + return matches_filter_; + } + + // Returns the result of the test. + const TestResult* result() const { return &result_; } + + private: +#if GTEST_HAS_DEATH_TEST + friend class internal::DefaultDeathTestFactory; +#endif // GTEST_HAS_DEATH_TEST + friend class Test; + friend class TestCase; + friend class internal::UnitTestImpl; + friend class internal::StreamingListenerTest; + friend TestInfo* internal::MakeAndRegisterTestInfo( + const char* test_case_name, + const char* name, + const char* type_param, + const char* value_param, + internal::TypeId fixture_class_id, + Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc, + internal::TestFactoryBase* factory); + + // Constructs a TestInfo object. The newly constructed instance assumes + // ownership of the factory object. + TestInfo(const std::string& test_case_name, + const std::string& name, + const char* a_type_param, // NULL if not a type-parameterized test + const char* a_value_param, // NULL if not a value-parameterized test + internal::TypeId fixture_class_id, + internal::TestFactoryBase* factory); + + // Increments the number of death tests encountered in this test so + // far. + int increment_death_test_count() { + return result_.increment_death_test_count(); + } + + // Creates the test object, runs it, records its result, and then + // deletes it. + void Run(); + + static void ClearTestResult(TestInfo* test_info) { + test_info->result_.Clear(); + } + + // These fields are immutable properties of the test. + const std::string test_case_name_; // Test case name + const std::string name_; // Test name + // Name of the parameter type, or NULL if this is not a typed or a + // type-parameterized test. + const internal::scoped_ptr type_param_; + // Text representation of the value parameter, or NULL if this is not a + // value-parameterized test. + const internal::scoped_ptr value_param_; + const internal::TypeId fixture_class_id_; // ID of the test fixture class + bool should_run_; // True iff this test should run + bool is_disabled_; // True iff this test is disabled + bool matches_filter_; // True if this test matches the + // user-specified filter. + internal::TestFactoryBase* const factory_; // The factory that creates + // the test object + + // This field is mutable and needs to be reset before running the + // test for the second time. + TestResult result_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestInfo); +}; + +// A test case, which consists of a vector of TestInfos. +// +// TestCase is not copyable. +class GTEST_API_ TestCase { + public: + // Creates a TestCase with the given name. + // + // TestCase does NOT have a default constructor. Always use this + // constructor to create a TestCase object. + // + // Arguments: + // + // name: name of the test case + // a_type_param: the name of the test's type parameter, or NULL if + // this is not a type-parameterized test. + // set_up_tc: pointer to the function that sets up the test case + // tear_down_tc: pointer to the function that tears down the test case + TestCase(const char* name, const char* a_type_param, + Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc); + + // Destructor of TestCase. + virtual ~TestCase(); + + // Gets the name of the TestCase. + const char* name() const { return name_.c_str(); } + + // Returns the name of the parameter type, or NULL if this is not a + // type-parameterized test case. + const char* type_param() const { + if (type_param_.get() != NULL) + return type_param_->c_str(); + return NULL; + } + + // Returns true if any test in this test case should run. + bool should_run() const { return should_run_; } + + // Gets the number of successful tests in this test case. + int successful_test_count() const; + + // Gets the number of failed tests in this test case. + int failed_test_count() const; + + // Gets the number of disabled tests that will be reported in the XML report. + int reportable_disabled_test_count() const; + + // Gets the number of disabled tests in this test case. + int disabled_test_count() const; + + // Gets the number of tests to be printed in the XML report. + int reportable_test_count() const; + + // Get the number of tests in this test case that should run. + int test_to_run_count() const; + + // Gets the number of all tests in this test case. + int total_test_count() const; + + // Returns true iff the test case passed. + bool Passed() const { return !Failed(); } + + // Returns true iff the test case failed. + bool Failed() const { return failed_test_count() > 0; } + + // Returns the elapsed time, in milliseconds. + TimeInMillis elapsed_time() const { return elapsed_time_; } + + // Returns the i-th test among all the tests. i can range from 0 to + // total_test_count() - 1. If i is not in that range, returns NULL. + const TestInfo* GetTestInfo(int i) const; + + // Returns the TestResult that holds test properties recorded during + // execution of SetUpTestCase and TearDownTestCase. + const TestResult& ad_hoc_test_result() const { return ad_hoc_test_result_; } + + private: + friend class Test; + friend class internal::UnitTestImpl; + + // Gets the (mutable) vector of TestInfos in this TestCase. + std::vector& test_info_list() { return test_info_list_; } + + // Gets the (immutable) vector of TestInfos in this TestCase. + const std::vector& test_info_list() const { + return test_info_list_; + } + + // Returns the i-th test among all the tests. i can range from 0 to + // total_test_count() - 1. If i is not in that range, returns NULL. + TestInfo* GetMutableTestInfo(int i); + + // Sets the should_run member. + void set_should_run(bool should) { should_run_ = should; } + + // Adds a TestInfo to this test case. Will delete the TestInfo upon + // destruction of the TestCase object. + void AddTestInfo(TestInfo * test_info); + + // Clears the results of all tests in this test case. + void ClearResult(); + + // Clears the results of all tests in the given test case. + static void ClearTestCaseResult(TestCase* test_case) { + test_case->ClearResult(); + } + + // Runs every test in this TestCase. + void Run(); + + // Runs SetUpTestCase() for this TestCase. This wrapper is needed + // for catching exceptions thrown from SetUpTestCase(). + void RunSetUpTestCase() { (*set_up_tc_)(); } + + // Runs TearDownTestCase() for this TestCase. This wrapper is + // needed for catching exceptions thrown from TearDownTestCase(). + void RunTearDownTestCase() { (*tear_down_tc_)(); } + + // Returns true iff test passed. + static bool TestPassed(const TestInfo* test_info) { + return test_info->should_run() && test_info->result()->Passed(); + } + + // Returns true iff test failed. + static bool TestFailed(const TestInfo* test_info) { + return test_info->should_run() && test_info->result()->Failed(); + } + + // Returns true iff the test is disabled and will be reported in the XML + // report. + static bool TestReportableDisabled(const TestInfo* test_info) { + return test_info->is_reportable() && test_info->is_disabled_; + } + + // Returns true iff test is disabled. + static bool TestDisabled(const TestInfo* test_info) { + return test_info->is_disabled_; + } + + // Returns true iff this test will appear in the XML report. + static bool TestReportable(const TestInfo* test_info) { + return test_info->is_reportable(); + } + + // Returns true if the given test should run. + static bool ShouldRunTest(const TestInfo* test_info) { + return test_info->should_run(); + } + + // Shuffles the tests in this test case. + void ShuffleTests(internal::Random* random); + + // Restores the test order to before the first shuffle. + void UnshuffleTests(); + + // Name of the test case. + std::string name_; + // Name of the parameter type, or NULL if this is not a typed or a + // type-parameterized test. + const internal::scoped_ptr type_param_; + // The vector of TestInfos in their original order. It owns the + // elements in the vector. + std::vector test_info_list_; + // Provides a level of indirection for the test list to allow easy + // shuffling and restoring the test order. The i-th element in this + // vector is the index of the i-th test in the shuffled test list. + std::vector test_indices_; + // Pointer to the function that sets up the test case. + Test::SetUpTestCaseFunc set_up_tc_; + // Pointer to the function that tears down the test case. + Test::TearDownTestCaseFunc tear_down_tc_; + // True iff any test in this test case should run. + bool should_run_; + // Elapsed time, in milliseconds. + TimeInMillis elapsed_time_; + // Holds test properties recorded during execution of SetUpTestCase and + // TearDownTestCase. + TestResult ad_hoc_test_result_; + + // We disallow copying TestCases. + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestCase); +}; + +// An Environment object is capable of setting up and tearing down an +// environment. The user should subclass this to define his own +// environment(s). +// +// An Environment object does the set-up and tear-down in virtual +// methods SetUp() and TearDown() instead of the constructor and the +// destructor, as: +// +// 1. You cannot safely throw from a destructor. This is a problem +// as in some cases Google Test is used where exceptions are enabled, and +// we may want to implement ASSERT_* using exceptions where they are +// available. +// 2. You cannot use ASSERT_* directly in a constructor or +// destructor. +class Environment { + public: + // The d'tor is virtual as we need to subclass Environment. + virtual ~Environment() {} + + // Override this to define how to set up the environment. + virtual void SetUp() {} + + // Override this to define how to tear down the environment. + virtual void TearDown() {} + private: + // If you see an error about overriding the following function or + // about it being private, you have mis-spelled SetUp() as Setup(). + struct Setup_should_be_spelled_SetUp {}; + virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; } +}; + +// The interface for tracing execution of tests. The methods are organized in +// the order the corresponding events are fired. +class TestEventListener { + public: + virtual ~TestEventListener() {} + + // Fired before any test activity starts. + virtual void OnTestProgramStart(const UnitTest& unit_test) = 0; + + // Fired before each iteration of tests starts. There may be more than + // one iteration if GTEST_FLAG(repeat) is set. iteration is the iteration + // index, starting from 0. + virtual void OnTestIterationStart(const UnitTest& unit_test, + int iteration) = 0; + + // Fired before environment set-up for each iteration of tests starts. + virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test) = 0; + + // Fired after environment set-up for each iteration of tests ends. + virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test) = 0; + + // Fired before the test case starts. + virtual void OnTestCaseStart(const TestCase& test_case) = 0; + + // Fired before the test starts. + virtual void OnTestStart(const TestInfo& test_info) = 0; + + // Fired after a failed assertion or a SUCCEED() invocation. + virtual void OnTestPartResult(const TestPartResult& test_part_result) = 0; + + // Fired after the test ends. + virtual void OnTestEnd(const TestInfo& test_info) = 0; + + // Fired after the test case ends. + virtual void OnTestCaseEnd(const TestCase& test_case) = 0; + + // Fired before environment tear-down for each iteration of tests starts. + virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test) = 0; + + // Fired after environment tear-down for each iteration of tests ends. + virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test) = 0; + + // Fired after each iteration of tests finishes. + virtual void OnTestIterationEnd(const UnitTest& unit_test, + int iteration) = 0; + + // Fired after all test activities have ended. + virtual void OnTestProgramEnd(const UnitTest& unit_test) = 0; +}; + +// The convenience class for users who need to override just one or two +// methods and are not concerned that a possible change to a signature of +// the methods they override will not be caught during the build. For +// comments about each method please see the definition of TestEventListener +// above. +class EmptyTestEventListener : public TestEventListener { + public: + virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {} + virtual void OnTestIterationStart(const UnitTest& /*unit_test*/, + int /*iteration*/) {} + virtual void OnEnvironmentsSetUpStart(const UnitTest& /*unit_test*/) {} + virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) {} + virtual void OnTestCaseStart(const TestCase& /*test_case*/) {} + virtual void OnTestStart(const TestInfo& /*test_info*/) {} + virtual void OnTestPartResult(const TestPartResult& /*test_part_result*/) {} + virtual void OnTestEnd(const TestInfo& /*test_info*/) {} + virtual void OnTestCaseEnd(const TestCase& /*test_case*/) {} + virtual void OnEnvironmentsTearDownStart(const UnitTest& /*unit_test*/) {} + virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) {} + virtual void OnTestIterationEnd(const UnitTest& /*unit_test*/, + int /*iteration*/) {} + virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {} +}; + +// TestEventListeners lets users add listeners to track events in Google Test. +class GTEST_API_ TestEventListeners { + public: + TestEventListeners(); + ~TestEventListeners(); + + // Appends an event listener to the end of the list. Google Test assumes + // the ownership of the listener (i.e. it will delete the listener when + // the test program finishes). + void Append(TestEventListener* listener); + + // Removes the given event listener from the list and returns it. It then + // becomes the caller's responsibility to delete the listener. Returns + // NULL if the listener is not found in the list. + TestEventListener* Release(TestEventListener* listener); + + // Returns the standard listener responsible for the default console + // output. Can be removed from the listeners list to shut down default + // console output. Note that removing this object from the listener list + // with Release transfers its ownership to the caller and makes this + // function return NULL the next time. + TestEventListener* default_result_printer() const { + return default_result_printer_; + } + + // Returns the standard listener responsible for the default XML output + // controlled by the --gtest_output=xml flag. Can be removed from the + // listeners list by users who want to shut down the default XML output + // controlled by this flag and substitute it with custom one. Note that + // removing this object from the listener list with Release transfers its + // ownership to the caller and makes this function return NULL the next + // time. + TestEventListener* default_xml_generator() const { + return default_xml_generator_; + } + + private: + friend class TestCase; + friend class TestInfo; + friend class internal::DefaultGlobalTestPartResultReporter; + friend class internal::NoExecDeathTest; + friend class internal::TestEventListenersAccessor; + friend class internal::UnitTestImpl; + + // Returns repeater that broadcasts the TestEventListener events to all + // subscribers. + TestEventListener* repeater(); + + // Sets the default_result_printer attribute to the provided listener. + // The listener is also added to the listener list and previous + // default_result_printer is removed from it and deleted. The listener can + // also be NULL in which case it will not be added to the list. Does + // nothing if the previous and the current listener objects are the same. + void SetDefaultResultPrinter(TestEventListener* listener); + + // Sets the default_xml_generator attribute to the provided listener. The + // listener is also added to the listener list and previous + // default_xml_generator is removed from it and deleted. The listener can + // also be NULL in which case it will not be added to the list. Does + // nothing if the previous and the current listener objects are the same. + void SetDefaultXmlGenerator(TestEventListener* listener); + + // Controls whether events will be forwarded by the repeater to the + // listeners in the list. + bool EventForwardingEnabled() const; + void SuppressEventForwarding(); + + // The actual list of listeners. + internal::TestEventRepeater* repeater_; + // Listener responsible for the standard result output. + TestEventListener* default_result_printer_; + // Listener responsible for the creation of the XML output file. + TestEventListener* default_xml_generator_; + + // We disallow copying TestEventListeners. + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventListeners); +}; + +// A UnitTest consists of a vector of TestCases. +// +// This is a singleton class. The only instance of UnitTest is +// created when UnitTest::GetInstance() is first called. This +// instance is never deleted. +// +// UnitTest is not copyable. +// +// This class is thread-safe as long as the methods are called +// according to their specification. +class GTEST_API_ UnitTest { + public: + // Gets the singleton UnitTest object. The first time this method + // is called, a UnitTest object is constructed and returned. + // Consecutive calls will return the same object. + static UnitTest* GetInstance(); + + // Runs all tests in this UnitTest object and prints the result. + // Returns 0 if successful, or 1 otherwise. + // + // This method can only be called from the main thread. + // + // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + int Run() GTEST_MUST_USE_RESULT_; + + // Returns the working directory when the first TEST() or TEST_F() + // was executed. The UnitTest object owns the string. + const char* original_working_dir() const; + + // Returns the TestCase object for the test that's currently running, + // or NULL if no test is running. + const TestCase* current_test_case() const + GTEST_LOCK_EXCLUDED_(mutex_); + + // Returns the TestInfo object for the test that's currently running, + // or NULL if no test is running. + const TestInfo* current_test_info() const + GTEST_LOCK_EXCLUDED_(mutex_); + + // Returns the random seed used at the start of the current test run. + int random_seed() const; + +#if GTEST_HAS_PARAM_TEST + // Returns the ParameterizedTestCaseRegistry object used to keep track of + // value-parameterized tests and instantiate and register them. + // + // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + internal::ParameterizedTestCaseRegistry& parameterized_test_registry() + GTEST_LOCK_EXCLUDED_(mutex_); +#endif // GTEST_HAS_PARAM_TEST + + // Gets the number of successful test cases. + int successful_test_case_count() const; + + // Gets the number of failed test cases. + int failed_test_case_count() const; + + // Gets the number of all test cases. + int total_test_case_count() const; + + // Gets the number of all test cases that contain at least one test + // that should run. + int test_case_to_run_count() const; + + // Gets the number of successful tests. + int successful_test_count() const; + + // Gets the number of failed tests. + int failed_test_count() const; + + // Gets the number of disabled tests that will be reported in the XML report. + int reportable_disabled_test_count() const; + + // Gets the number of disabled tests. + int disabled_test_count() const; + + // Gets the number of tests to be printed in the XML report. + int reportable_test_count() const; + + // Gets the number of all tests. + int total_test_count() const; + + // Gets the number of tests that should run. + int test_to_run_count() const; + + // Gets the time of the test program start, in ms from the start of the + // UNIX epoch. + TimeInMillis start_timestamp() const; + + // Gets the elapsed time, in milliseconds. + TimeInMillis elapsed_time() const; + + // Returns true iff the unit test passed (i.e. all test cases passed). + bool Passed() const; + + // Returns true iff the unit test failed (i.e. some test case failed + // or something outside of all tests failed). + bool Failed() const; + + // Gets the i-th test case among all the test cases. i can range from 0 to + // total_test_case_count() - 1. If i is not in that range, returns NULL. + const TestCase* GetTestCase(int i) const; + + // Returns the TestResult containing information on test failures and + // properties logged outside of individual test cases. + const TestResult& ad_hoc_test_result() const; + + // Returns the list of event listeners that can be used to track events + // inside Google Test. + TestEventListeners& listeners(); + + private: + // Registers and returns a global test environment. When a test + // program is run, all global test environments will be set-up in + // the order they were registered. After all tests in the program + // have finished, all global test environments will be torn-down in + // the *reverse* order they were registered. + // + // The UnitTest object takes ownership of the given environment. + // + // This method can only be called from the main thread. + Environment* AddEnvironment(Environment* env); + + // Adds a TestPartResult to the current TestResult object. All + // Google Test assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc) + // eventually call this to report their results. The user code + // should use the assertion macros instead of calling this directly. + void AddTestPartResult(TestPartResult::Type result_type, + const char* file_name, + int line_number, + const std::string& message, + const std::string& os_stack_trace) + GTEST_LOCK_EXCLUDED_(mutex_); + + // Adds a TestProperty to the current TestResult object when invoked from + // inside a test, to current TestCase's ad_hoc_test_result_ when invoked + // from SetUpTestCase or TearDownTestCase, or to the global property set + // when invoked elsewhere. If the result already contains a property with + // the same key, the value will be updated. + void RecordProperty(const std::string& key, const std::string& value); + + // Gets the i-th test case among all the test cases. i can range from 0 to + // total_test_case_count() - 1. If i is not in that range, returns NULL. + TestCase* GetMutableTestCase(int i); + + // Accessors for the implementation object. + internal::UnitTestImpl* impl() { return impl_; } + const internal::UnitTestImpl* impl() const { return impl_; } + + // These classes and funcions are friends as they need to access private + // members of UnitTest. + friend class Test; + friend class internal::AssertHelper; + friend class internal::ScopedTrace; + friend class internal::StreamingListenerTest; + friend class internal::UnitTestRecordPropertyTestHelper; + friend Environment* AddGlobalTestEnvironment(Environment* env); + friend internal::UnitTestImpl* internal::GetUnitTestImpl(); + friend void internal::ReportFailureInUnknownLocation( + TestPartResult::Type result_type, + const std::string& message); + + // Creates an empty UnitTest. + UnitTest(); + + // D'tor + virtual ~UnitTest(); + + // Pushes a trace defined by SCOPED_TRACE() on to the per-thread + // Google Test trace stack. + void PushGTestTrace(const internal::TraceInfo& trace) + GTEST_LOCK_EXCLUDED_(mutex_); + + // Pops a trace from the per-thread Google Test trace stack. + void PopGTestTrace() + GTEST_LOCK_EXCLUDED_(mutex_); + + // Protects mutable state in *impl_. This is mutable as some const + // methods need to lock it too. + mutable internal::Mutex mutex_; + + // Opaque implementation object. This field is never changed once + // the object is constructed. We don't mark it as const here, as + // doing so will cause a warning in the constructor of UnitTest. + // Mutable state in *impl_ is protected by mutex_. + internal::UnitTestImpl* impl_; + + // We disallow copying UnitTest. + GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTest); +}; + +// A convenient wrapper for adding an environment for the test +// program. +// +// You should call this before RUN_ALL_TESTS() is called, probably in +// main(). If you use gtest_main, you need to call this before main() +// starts for it to take effect. For example, you can define a global +// variable like this: +// +// testing::Environment* const foo_env = +// testing::AddGlobalTestEnvironment(new FooEnvironment); +// +// However, we strongly recommend you to write your own main() and +// call AddGlobalTestEnvironment() there, as relying on initialization +// of global variables makes the code harder to read and may cause +// problems when you register multiple environments from different +// translation units and the environments have dependencies among them +// (remember that the compiler doesn't guarantee the order in which +// global variables from different translation units are initialized). +inline Environment* AddGlobalTestEnvironment(Environment* env) { + return UnitTest::GetInstance()->AddEnvironment(env); +} + +// Initializes Google Test. This must be called before calling +// RUN_ALL_TESTS(). In particular, it parses a command line for the +// flags that Google Test recognizes. Whenever a Google Test flag is +// seen, it is removed from argv, and *argc is decremented. +// +// No value is returned. Instead, the Google Test flag variables are +// updated. +// +// Calling the function for the second time has no user-visible effect. +GTEST_API_ void InitGoogleTest(int* argc, char** argv); + +// This overloaded version can be used in Windows programs compiled in +// UNICODE mode. +GTEST_API_ void InitGoogleTest(int* argc, wchar_t** argv); + +namespace internal { + +// FormatForComparison::Format(value) formats a +// value of type ToPrint that is an operand of a comparison assertion +// (e.g. ASSERT_EQ). OtherOperand is the type of the other operand in +// the comparison, and is used to help determine the best way to +// format the value. In particular, when the value is a C string +// (char pointer) and the other operand is an STL string object, we +// want to format the C string as a string, since we know it is +// compared by value with the string object. If the value is a char +// pointer but the other operand is not an STL string object, we don't +// know whether the pointer is supposed to point to a NUL-terminated +// string, and thus want to print it as a pointer to be safe. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + +// The default case. +template +class FormatForComparison { + public: + static ::std::string Format(const ToPrint& value) { + return ::testing::PrintToString(value); + } +}; + +// Array. +template +class FormatForComparison { + public: + static ::std::string Format(const ToPrint* value) { + return FormatForComparison::Format(value); + } +}; + +// By default, print C string as pointers to be safe, as we don't know +// whether they actually point to a NUL-terminated string. + +#define GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(CharType) \ + template \ + class FormatForComparison { \ + public: \ + static ::std::string Format(CharType* value) { \ + return ::testing::PrintToString(static_cast(value)); \ + } \ + } + +GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(char); +GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const char); +GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(wchar_t); +GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const wchar_t); + +#undef GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_ + +// If a C string is compared with an STL string object, we know it's meant +// to point to a NUL-terminated string, and thus can print it as a string. + +#define GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(CharType, OtherStringType) \ + template <> \ + class FormatForComparison { \ + public: \ + static ::std::string Format(CharType* value) { \ + return ::testing::PrintToString(value); \ + } \ + } + +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(char, ::std::string); +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const char, ::std::string); + +#if GTEST_HAS_GLOBAL_STRING +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(char, ::string); +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const char, ::string); +#endif + +#if GTEST_HAS_GLOBAL_WSTRING +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(wchar_t, ::wstring); +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const wchar_t, ::wstring); +#endif + +#if GTEST_HAS_STD_WSTRING +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(wchar_t, ::std::wstring); +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const wchar_t, ::std::wstring); +#endif + +#undef GTEST_IMPL_FORMAT_C_STRING_AS_STRING_ + +// Formats a comparison assertion (e.g. ASSERT_EQ, EXPECT_LT, and etc) +// operand to be used in a failure message. The type (but not value) +// of the other operand may affect the format. This allows us to +// print a char* as a raw pointer when it is compared against another +// char* or void*, and print it as a C string when it is compared +// against an std::string object, for example. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +template +std::string FormatForComparisonFailureMessage( + const T1& value, const T2& /* other_operand */) { + return FormatForComparison::Format(value); +} + +// The helper function for {ASSERT|EXPECT}_EQ. +template +AssertionResult CmpHelperEQ(const char* expected_expression, + const char* actual_expression, + const T1& expected, + const T2& actual) { +#ifdef _MSC_VER +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4389) // Temporarily disables warning on + // signed/unsigned mismatch. +#endif + + if (expected == actual) { + return AssertionSuccess(); + } + +#ifdef _MSC_VER +# pragma warning(pop) // Restores the warning state. +#endif + + return EqFailure(expected_expression, + actual_expression, + FormatForComparisonFailureMessage(expected, actual), + FormatForComparisonFailureMessage(actual, expected), + false); +} + +// With this overloaded version, we allow anonymous enums to be used +// in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous enums +// can be implicitly cast to BiggestInt. +GTEST_API_ AssertionResult CmpHelperEQ(const char* expected_expression, + const char* actual_expression, + BiggestInt expected, + BiggestInt actual); + +// The helper class for {ASSERT|EXPECT}_EQ. The template argument +// lhs_is_null_literal is true iff the first argument to ASSERT_EQ() +// is a null pointer literal. The following default implementation is +// for lhs_is_null_literal being false. +template +class EqHelper { + public: + // This templatized version is for the general case. + template + static AssertionResult Compare(const char* expected_expression, + const char* actual_expression, + const T1& expected, + const T2& actual) { + return CmpHelperEQ(expected_expression, actual_expression, expected, + actual); + } + + // With this overloaded version, we allow anonymous enums to be used + // in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous + // enums can be implicitly cast to BiggestInt. + // + // Even though its body looks the same as the above version, we + // cannot merge the two, as it will make anonymous enums unhappy. + static AssertionResult Compare(const char* expected_expression, + const char* actual_expression, + BiggestInt expected, + BiggestInt actual) { + return CmpHelperEQ(expected_expression, actual_expression, expected, + actual); + } +}; + +// This specialization is used when the first argument to ASSERT_EQ() +// is a null pointer literal, like NULL, false, or 0. +template <> +class EqHelper { + public: + // We define two overloaded versions of Compare(). The first + // version will be picked when the second argument to ASSERT_EQ() is + // NOT a pointer, e.g. ASSERT_EQ(0, AnIntFunction()) or + // EXPECT_EQ(false, a_bool). + template + static AssertionResult Compare( + const char* expected_expression, + const char* actual_expression, + const T1& expected, + const T2& actual, + // The following line prevents this overload from being considered if T2 + // is not a pointer type. We need this because ASSERT_EQ(NULL, my_ptr) + // expands to Compare("", "", NULL, my_ptr), which requires a conversion + // to match the Secret* in the other overload, which would otherwise make + // this template match better. + typename EnableIf::value>::type* = 0) { + return CmpHelperEQ(expected_expression, actual_expression, expected, + actual); + } + + // This version will be picked when the second argument to ASSERT_EQ() is a + // pointer, e.g. ASSERT_EQ(NULL, a_pointer). + template + static AssertionResult Compare( + const char* expected_expression, + const char* actual_expression, + // We used to have a second template parameter instead of Secret*. That + // template parameter would deduce to 'long', making this a better match + // than the first overload even without the first overload's EnableIf. + // Unfortunately, gcc with -Wconversion-null warns when "passing NULL to + // non-pointer argument" (even a deduced integral argument), so the old + // implementation caused warnings in user code. + Secret* /* expected (NULL) */, + T* actual) { + // We already know that 'expected' is a null pointer. + return CmpHelperEQ(expected_expression, actual_expression, + static_cast(NULL), actual); + } +}; + +// A macro for implementing the helper functions needed to implement +// ASSERT_?? and EXPECT_??. It is here just to avoid copy-and-paste +// of similar code. +// +// For each templatized helper function, we also define an overloaded +// version for BiggestInt in order to reduce code bloat and allow +// anonymous enums to be used with {ASSERT|EXPECT}_?? when compiled +// with gcc 4. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +#define GTEST_IMPL_CMP_HELPER_(op_name, op)\ +template \ +AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \ + const T1& val1, const T2& val2) {\ + if (val1 op val2) {\ + return AssertionSuccess();\ + } else {\ + return AssertionFailure() \ + << "Expected: (" << expr1 << ") " #op " (" << expr2\ + << "), actual: " << FormatForComparisonFailureMessage(val1, val2)\ + << " vs " << FormatForComparisonFailureMessage(val2, val1);\ + }\ +}\ +GTEST_API_ AssertionResult CmpHelper##op_name(\ + const char* expr1, const char* expr2, BiggestInt val1, BiggestInt val2) + +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + +// Implements the helper function for {ASSERT|EXPECT}_NE +GTEST_IMPL_CMP_HELPER_(NE, !=); +// Implements the helper function for {ASSERT|EXPECT}_LE +GTEST_IMPL_CMP_HELPER_(LE, <=); +// Implements the helper function for {ASSERT|EXPECT}_LT +GTEST_IMPL_CMP_HELPER_(LT, <); +// Implements the helper function for {ASSERT|EXPECT}_GE +GTEST_IMPL_CMP_HELPER_(GE, >=); +// Implements the helper function for {ASSERT|EXPECT}_GT +GTEST_IMPL_CMP_HELPER_(GT, >); + +#undef GTEST_IMPL_CMP_HELPER_ + +// The helper function for {ASSERT|EXPECT}_STREQ. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTREQ(const char* expected_expression, + const char* actual_expression, + const char* expected, + const char* actual); + +// The helper function for {ASSERT|EXPECT}_STRCASEEQ. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRCASEEQ(const char* expected_expression, + const char* actual_expression, + const char* expected, + const char* actual); + +// The helper function for {ASSERT|EXPECT}_STRNE. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2); + +// The helper function for {ASSERT|EXPECT}_STRCASENE. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRCASENE(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2); + + +// Helper function for *_STREQ on wide strings. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTREQ(const char* expected_expression, + const char* actual_expression, + const wchar_t* expected, + const wchar_t* actual); + +// Helper function for *_STRNE on wide strings. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression, + const char* s2_expression, + const wchar_t* s1, + const wchar_t* s2); + +} // namespace internal + +// IsSubstring() and IsNotSubstring() are intended to be used as the +// first argument to {EXPECT,ASSERT}_PRED_FORMAT2(), not by +// themselves. They check whether needle is a substring of haystack +// (NULL is considered a substring of itself only), and return an +// appropriate error message when they fail. +// +// The {needle,haystack}_expr arguments are the stringified +// expressions that generated the two real arguments. +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const char* needle, const char* haystack); +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const wchar_t* needle, const wchar_t* haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const char* needle, const char* haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const wchar_t* needle, const wchar_t* haystack); +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::string& needle, const ::std::string& haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::string& needle, const ::std::string& haystack); + +#if GTEST_HAS_STD_WSTRING +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::wstring& needle, const ::std::wstring& haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::wstring& needle, const ::std::wstring& haystack); +#endif // GTEST_HAS_STD_WSTRING + +namespace internal { + +// Helper template function for comparing floating-points. +// +// Template parameter: +// +// RawType: the raw floating-point type (either float or double) +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +template +AssertionResult CmpHelperFloatingPointEQ(const char* expected_expression, + const char* actual_expression, + RawType expected, + RawType actual) { + const FloatingPoint lhs(expected), rhs(actual); + + if (lhs.AlmostEquals(rhs)) { + return AssertionSuccess(); + } + + ::std::stringstream expected_ss; + expected_ss << std::setprecision(std::numeric_limits::digits10 + 2) + << expected; + + ::std::stringstream actual_ss; + actual_ss << std::setprecision(std::numeric_limits::digits10 + 2) + << actual; + + return EqFailure(expected_expression, + actual_expression, + StringStreamToString(&expected_ss), + StringStreamToString(&actual_ss), + false); +} + +// Helper function for implementing ASSERT_NEAR. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult DoubleNearPredFormat(const char* expr1, + const char* expr2, + const char* abs_error_expr, + double val1, + double val2, + double abs_error); + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// A class that enables one to stream messages to assertion macros +class GTEST_API_ AssertHelper { + public: + // Constructor. + AssertHelper(TestPartResult::Type type, + const char* file, + int line, + const char* message); + ~AssertHelper(); + + // Message assignment is a semantic trick to enable assertion + // streaming; see the GTEST_MESSAGE_ macro below. + void operator=(const Message& message) const; + + private: + // We put our data in a struct so that the size of the AssertHelper class can + // be as small as possible. This is important because gcc is incapable of + // re-using stack space even for temporary variables, so every EXPECT_EQ + // reserves stack space for another AssertHelper. + struct AssertHelperData { + AssertHelperData(TestPartResult::Type t, + const char* srcfile, + int line_num, + const char* msg) + : type(t), file(srcfile), line(line_num), message(msg) { } + + TestPartResult::Type const type; + const char* const file; + int const line; + std::string const message; + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelperData); + }; + + AssertHelperData* const data_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelper); +}; + +} // namespace internal + +#if GTEST_HAS_PARAM_TEST +// The pure interface class that all value-parameterized tests inherit from. +// A value-parameterized class must inherit from both ::testing::Test and +// ::testing::WithParamInterface. In most cases that just means inheriting +// from ::testing::TestWithParam, but more complicated test hierarchies +// may need to inherit from Test and WithParamInterface at different levels. +// +// This interface has support for accessing the test parameter value via +// the GetParam() method. +// +// Use it with one of the parameter generator defining functions, like Range(), +// Values(), ValuesIn(), Bool(), and Combine(). +// +// class FooTest : public ::testing::TestWithParam { +// protected: +// FooTest() { +// // Can use GetParam() here. +// } +// virtual ~FooTest() { +// // Can use GetParam() here. +// } +// virtual void SetUp() { +// // Can use GetParam() here. +// } +// virtual void TearDown { +// // Can use GetParam() here. +// } +// }; +// TEST_P(FooTest, DoesBar) { +// // Can use GetParam() method here. +// Foo foo; +// ASSERT_TRUE(foo.DoesBar(GetParam())); +// } +// INSTANTIATE_TEST_CASE_P(OneToTenRange, FooTest, ::testing::Range(1, 10)); + +template +class WithParamInterface { + public: + typedef T ParamType; + virtual ~WithParamInterface() {} + + // The current parameter value. Is also available in the test fixture's + // constructor. This member function is non-static, even though it only + // references static data, to reduce the opportunity for incorrect uses + // like writing 'WithParamInterface::GetParam()' for a test that + // uses a fixture whose parameter type is int. + const ParamType& GetParam() const { + GTEST_CHECK_(parameter_ != NULL) + << "GetParam() can only be called inside a value-parameterized test " + << "-- did you intend to write TEST_P instead of TEST_F?"; + return *parameter_; + } + + private: + // Sets parameter value. The caller is responsible for making sure the value + // remains alive and unchanged throughout the current test. + static void SetParam(const ParamType* parameter) { + parameter_ = parameter; + } + + // Static value used for accessing parameter during a test lifetime. + static const ParamType* parameter_; + + // TestClass must be a subclass of WithParamInterface and Test. + template friend class internal::ParameterizedTestFactory; +}; + +template +const T* WithParamInterface::parameter_ = NULL; + +// Most value-parameterized classes can ignore the existence of +// WithParamInterface, and can just inherit from ::testing::TestWithParam. + +template +class TestWithParam : public Test, public WithParamInterface { +}; + +#endif // GTEST_HAS_PARAM_TEST + +// Macros for indicating success/failure in test code. + +// ADD_FAILURE unconditionally adds a failure to the current test. +// SUCCEED generates a success - it doesn't automatically make the +// current test successful, as a test is only successful when it has +// no failure. +// +// EXPECT_* verifies that a certain condition is satisfied. If not, +// it behaves like ADD_FAILURE. In particular: +// +// EXPECT_TRUE verifies that a Boolean condition is true. +// EXPECT_FALSE verifies that a Boolean condition is false. +// +// FAIL and ASSERT_* are similar to ADD_FAILURE and EXPECT_*, except +// that they will also abort the current function on failure. People +// usually want the fail-fast behavior of FAIL and ASSERT_*, but those +// writing data-driven tests often find themselves using ADD_FAILURE +// and EXPECT_* more. + +// Generates a nonfatal failure with a generic message. +#define ADD_FAILURE() GTEST_NONFATAL_FAILURE_("Failed") + +// Generates a nonfatal failure at the given source file location with +// a generic message. +#define ADD_FAILURE_AT(file, line) \ + GTEST_MESSAGE_AT_(file, line, "Failed", \ + ::testing::TestPartResult::kNonFatalFailure) + +// Generates a fatal failure with a generic message. +#define GTEST_FAIL() GTEST_FATAL_FAILURE_("Failed") + +// Define this macro to 1 to omit the definition of FAIL(), which is a +// generic name and clashes with some other libraries. +#if !GTEST_DONT_DEFINE_FAIL +# define FAIL() GTEST_FAIL() +#endif + +// Generates a success with a generic message. +#define GTEST_SUCCEED() GTEST_SUCCESS_("Succeeded") + +// Define this macro to 1 to omit the definition of SUCCEED(), which +// is a generic name and clashes with some other libraries. +#if !GTEST_DONT_DEFINE_SUCCEED +# define SUCCEED() GTEST_SUCCEED() +#endif + +// Macros for testing exceptions. +// +// * {ASSERT|EXPECT}_THROW(statement, expected_exception): +// Tests that the statement throws the expected exception. +// * {ASSERT|EXPECT}_NO_THROW(statement): +// Tests that the statement doesn't throw any exception. +// * {ASSERT|EXPECT}_ANY_THROW(statement): +// Tests that the statement throws an exception. + +#define EXPECT_THROW(statement, expected_exception) \ + GTEST_TEST_THROW_(statement, expected_exception, GTEST_NONFATAL_FAILURE_) +#define EXPECT_NO_THROW(statement) \ + GTEST_TEST_NO_THROW_(statement, GTEST_NONFATAL_FAILURE_) +#define EXPECT_ANY_THROW(statement) \ + GTEST_TEST_ANY_THROW_(statement, GTEST_NONFATAL_FAILURE_) +#define ASSERT_THROW(statement, expected_exception) \ + GTEST_TEST_THROW_(statement, expected_exception, GTEST_FATAL_FAILURE_) +#define ASSERT_NO_THROW(statement) \ + GTEST_TEST_NO_THROW_(statement, GTEST_FATAL_FAILURE_) +#define ASSERT_ANY_THROW(statement) \ + GTEST_TEST_ANY_THROW_(statement, GTEST_FATAL_FAILURE_) + +// Boolean assertions. Condition can be either a Boolean expression or an +// AssertionResult. For more information on how to use AssertionResult with +// these macros see comments on that class. +#define EXPECT_TRUE(condition) \ + GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \ + GTEST_NONFATAL_FAILURE_) +#define EXPECT_FALSE(condition) \ + GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \ + GTEST_NONFATAL_FAILURE_) +#define ASSERT_TRUE(condition) \ + GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \ + GTEST_FATAL_FAILURE_) +#define ASSERT_FALSE(condition) \ + GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \ + GTEST_FATAL_FAILURE_) + +// Includes the auto-generated header that implements a family of +// generic predicate assertion macros. +// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// This file is AUTOMATICALLY GENERATED on 10/31/2011 by command +// 'gen_gtest_pred_impl.py 5'. DO NOT EDIT BY HAND! +// +// Implements a family of generic predicate assertion macros. + +#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ +#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ + +// Makes sure this header is not included before gtest.h. +#ifndef GTEST_INCLUDE_GTEST_GTEST_H_ +# error Do not include gtest_pred_impl.h directly. Include gtest.h instead. +#endif // GTEST_INCLUDE_GTEST_GTEST_H_ + +// This header implements a family of generic predicate assertion +// macros: +// +// ASSERT_PRED_FORMAT1(pred_format, v1) +// ASSERT_PRED_FORMAT2(pred_format, v1, v2) +// ... +// +// where pred_format is a function or functor that takes n (in the +// case of ASSERT_PRED_FORMATn) values and their source expression +// text, and returns a testing::AssertionResult. See the definition +// of ASSERT_EQ in gtest.h for an example. +// +// If you don't care about formatting, you can use the more +// restrictive version: +// +// ASSERT_PRED1(pred, v1) +// ASSERT_PRED2(pred, v1, v2) +// ... +// +// where pred is an n-ary function or functor that returns bool, +// and the values v1, v2, ..., must support the << operator for +// streaming to std::ostream. +// +// We also define the EXPECT_* variations. +// +// For now we only support predicates whose arity is at most 5. +// Please email googletestframework@googlegroups.com if you need +// support for higher arities. + +// GTEST_ASSERT_ is the basic statement to which all of the assertions +// in this file reduce. Don't use this in your code. + +#define GTEST_ASSERT_(expression, on_failure) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (const ::testing::AssertionResult gtest_ar = (expression)) \ + ; \ + else \ + on_failure(gtest_ar.failure_message()) + + +// Helper function for implementing {EXPECT|ASSERT}_PRED1. Don't use +// this in your code. +template +AssertionResult AssertPred1Helper(const char* pred_text, + const char* e1, + Pred pred, + const T1& v1) { + if (pred(v1)) return AssertionSuccess(); + + return AssertionFailure() << pred_text << "(" + << e1 << ") evaluates to false, where" + << "\n" << e1 << " evaluates to " << v1; +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT1. +// Don't use this in your code. +#define GTEST_PRED_FORMAT1_(pred_format, v1, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, v1), \ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED1. Don't use +// this in your code. +#define GTEST_PRED1_(pred, v1, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred1Helper(#pred, \ + #v1, \ + pred, \ + v1), on_failure) + +// Unary predicate assertion macros. +#define EXPECT_PRED_FORMAT1(pred_format, v1) \ + GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED1(pred, v1) \ + GTEST_PRED1_(pred, v1, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT1(pred_format, v1) \ + GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED1(pred, v1) \ + GTEST_PRED1_(pred, v1, GTEST_FATAL_FAILURE_) + + + +// Helper function for implementing {EXPECT|ASSERT}_PRED2. Don't use +// this in your code. +template +AssertionResult AssertPred2Helper(const char* pred_text, + const char* e1, + const char* e2, + Pred pred, + const T1& v1, + const T2& v2) { + if (pred(v1, v2)) return AssertionSuccess(); + + return AssertionFailure() << pred_text << "(" + << e1 << ", " + << e2 << ") evaluates to false, where" + << "\n" << e1 << " evaluates to " << v1 + << "\n" << e2 << " evaluates to " << v2; +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT2. +// Don't use this in your code. +#define GTEST_PRED_FORMAT2_(pred_format, v1, v2, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, #v2, v1, v2), \ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED2. Don't use +// this in your code. +#define GTEST_PRED2_(pred, v1, v2, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred2Helper(#pred, \ + #v1, \ + #v2, \ + pred, \ + v1, \ + v2), on_failure) + +// Binary predicate assertion macros. +#define EXPECT_PRED_FORMAT2(pred_format, v1, v2) \ + GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED2(pred, v1, v2) \ + GTEST_PRED2_(pred, v1, v2, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT2(pred_format, v1, v2) \ + GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED2(pred, v1, v2) \ + GTEST_PRED2_(pred, v1, v2, GTEST_FATAL_FAILURE_) + + + +// Helper function for implementing {EXPECT|ASSERT}_PRED3. Don't use +// this in your code. +template +AssertionResult AssertPred3Helper(const char* pred_text, + const char* e1, + const char* e2, + const char* e3, + Pred pred, + const T1& v1, + const T2& v2, + const T3& v3) { + if (pred(v1, v2, v3)) return AssertionSuccess(); + + return AssertionFailure() << pred_text << "(" + << e1 << ", " + << e2 << ", " + << e3 << ") evaluates to false, where" + << "\n" << e1 << " evaluates to " << v1 + << "\n" << e2 << " evaluates to " << v2 + << "\n" << e3 << " evaluates to " << v3; +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT3. +// Don't use this in your code. +#define GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, #v2, #v3, v1, v2, v3), \ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED3. Don't use +// this in your code. +#define GTEST_PRED3_(pred, v1, v2, v3, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred3Helper(#pred, \ + #v1, \ + #v2, \ + #v3, \ + pred, \ + v1, \ + v2, \ + v3), on_failure) + +// Ternary predicate assertion macros. +#define EXPECT_PRED_FORMAT3(pred_format, v1, v2, v3) \ + GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED3(pred, v1, v2, v3) \ + GTEST_PRED3_(pred, v1, v2, v3, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT3(pred_format, v1, v2, v3) \ + GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED3(pred, v1, v2, v3) \ + GTEST_PRED3_(pred, v1, v2, v3, GTEST_FATAL_FAILURE_) + + + +// Helper function for implementing {EXPECT|ASSERT}_PRED4. Don't use +// this in your code. +template +AssertionResult AssertPred4Helper(const char* pred_text, + const char* e1, + const char* e2, + const char* e3, + const char* e4, + Pred pred, + const T1& v1, + const T2& v2, + const T3& v3, + const T4& v4) { + if (pred(v1, v2, v3, v4)) return AssertionSuccess(); + + return AssertionFailure() << pred_text << "(" + << e1 << ", " + << e2 << ", " + << e3 << ", " + << e4 << ") evaluates to false, where" + << "\n" << e1 << " evaluates to " << v1 + << "\n" << e2 << " evaluates to " << v2 + << "\n" << e3 << " evaluates to " << v3 + << "\n" << e4 << " evaluates to " << v4; +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT4. +// Don't use this in your code. +#define GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, v1, v2, v3, v4), \ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED4. Don't use +// this in your code. +#define GTEST_PRED4_(pred, v1, v2, v3, v4, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred4Helper(#pred, \ + #v1, \ + #v2, \ + #v3, \ + #v4, \ + pred, \ + v1, \ + v2, \ + v3, \ + v4), on_failure) + +// 4-ary predicate assertion macros. +#define EXPECT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \ + GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED4(pred, v1, v2, v3, v4) \ + GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \ + GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED4(pred, v1, v2, v3, v4) \ + GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_FATAL_FAILURE_) + + + +// Helper function for implementing {EXPECT|ASSERT}_PRED5. Don't use +// this in your code. +template +AssertionResult AssertPred5Helper(const char* pred_text, + const char* e1, + const char* e2, + const char* e3, + const char* e4, + const char* e5, + Pred pred, + const T1& v1, + const T2& v2, + const T3& v3, + const T4& v4, + const T5& v5) { + if (pred(v1, v2, v3, v4, v5)) return AssertionSuccess(); + + return AssertionFailure() << pred_text << "(" + << e1 << ", " + << e2 << ", " + << e3 << ", " + << e4 << ", " + << e5 << ") evaluates to false, where" + << "\n" << e1 << " evaluates to " << v1 + << "\n" << e2 << " evaluates to " << v2 + << "\n" << e3 << " evaluates to " << v3 + << "\n" << e4 << " evaluates to " << v4 + << "\n" << e5 << " evaluates to " << v5; +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT5. +// Don't use this in your code. +#define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5), \ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED5. Don't use +// this in your code. +#define GTEST_PRED5_(pred, v1, v2, v3, v4, v5, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred5Helper(#pred, \ + #v1, \ + #v2, \ + #v3, \ + #v4, \ + #v5, \ + pred, \ + v1, \ + v2, \ + v3, \ + v4, \ + v5), on_failure) + +// 5-ary predicate assertion macros. +#define EXPECT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \ + GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED5(pred, v1, v2, v3, v4, v5) \ + GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \ + GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED5(pred, v1, v2, v3, v4, v5) \ + GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_) + + + +#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ + +// Macros for testing equalities and inequalities. +// +// * {ASSERT|EXPECT}_EQ(expected, actual): Tests that expected == actual +// * {ASSERT|EXPECT}_NE(v1, v2): Tests that v1 != v2 +// * {ASSERT|EXPECT}_LT(v1, v2): Tests that v1 < v2 +// * {ASSERT|EXPECT}_LE(v1, v2): Tests that v1 <= v2 +// * {ASSERT|EXPECT}_GT(v1, v2): Tests that v1 > v2 +// * {ASSERT|EXPECT}_GE(v1, v2): Tests that v1 >= v2 +// +// When they are not, Google Test prints both the tested expressions and +// their actual values. The values must be compatible built-in types, +// or you will get a compiler error. By "compatible" we mean that the +// values can be compared by the respective operator. +// +// Note: +// +// 1. It is possible to make a user-defined type work with +// {ASSERT|EXPECT}_??(), but that requires overloading the +// comparison operators and is thus discouraged by the Google C++ +// Usage Guide. Therefore, you are advised to use the +// {ASSERT|EXPECT}_TRUE() macro to assert that two objects are +// equal. +// +// 2. The {ASSERT|EXPECT}_??() macros do pointer comparisons on +// pointers (in particular, C strings). Therefore, if you use it +// with two C strings, you are testing how their locations in memory +// are related, not how their content is related. To compare two C +// strings by content, use {ASSERT|EXPECT}_STR*(). +// +// 3. {ASSERT|EXPECT}_EQ(expected, actual) is preferred to +// {ASSERT|EXPECT}_TRUE(expected == actual), as the former tells you +// what the actual value is when it fails, and similarly for the +// other comparisons. +// +// 4. Do not depend on the order in which {ASSERT|EXPECT}_??() +// evaluate their arguments, which is undefined. +// +// 5. These macros evaluate their arguments exactly once. +// +// Examples: +// +// EXPECT_NE(5, Foo()); +// EXPECT_EQ(NULL, a_pointer); +// ASSERT_LT(i, array_size); +// ASSERT_GT(records.size(), 0) << "There is no record left."; + +#define EXPECT_EQ(expected, actual) \ + EXPECT_PRED_FORMAT2(::testing::internal:: \ + EqHelper::Compare, \ + expected, actual) +#define EXPECT_NE(expected, actual) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperNE, expected, actual) +#define EXPECT_LE(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2) +#define EXPECT_LT(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2) +#define EXPECT_GE(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2) +#define EXPECT_GT(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2) + +#define GTEST_ASSERT_EQ(expected, actual) \ + ASSERT_PRED_FORMAT2(::testing::internal:: \ + EqHelper::Compare, \ + expected, actual) +#define GTEST_ASSERT_NE(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperNE, val1, val2) +#define GTEST_ASSERT_LE(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2) +#define GTEST_ASSERT_LT(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2) +#define GTEST_ASSERT_GE(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2) +#define GTEST_ASSERT_GT(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2) + +// Define macro GTEST_DONT_DEFINE_ASSERT_XY to 1 to omit the definition of +// ASSERT_XY(), which clashes with some users' own code. + +#if !GTEST_DONT_DEFINE_ASSERT_EQ +# define ASSERT_EQ(val1, val2) GTEST_ASSERT_EQ(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_NE +# define ASSERT_NE(val1, val2) GTEST_ASSERT_NE(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_LE +# define ASSERT_LE(val1, val2) GTEST_ASSERT_LE(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_LT +# define ASSERT_LT(val1, val2) GTEST_ASSERT_LT(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_GE +# define ASSERT_GE(val1, val2) GTEST_ASSERT_GE(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_GT +# define ASSERT_GT(val1, val2) GTEST_ASSERT_GT(val1, val2) +#endif + +// C-string Comparisons. All tests treat NULL and any non-NULL string +// as different. Two NULLs are equal. +// +// * {ASSERT|EXPECT}_STREQ(s1, s2): Tests that s1 == s2 +// * {ASSERT|EXPECT}_STRNE(s1, s2): Tests that s1 != s2 +// * {ASSERT|EXPECT}_STRCASEEQ(s1, s2): Tests that s1 == s2, ignoring case +// * {ASSERT|EXPECT}_STRCASENE(s1, s2): Tests that s1 != s2, ignoring case +// +// For wide or narrow string objects, you can use the +// {ASSERT|EXPECT}_??() macros. +// +// Don't depend on the order in which the arguments are evaluated, +// which is undefined. +// +// These macros evaluate their arguments exactly once. + +#define EXPECT_STREQ(expected, actual) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, expected, actual) +#define EXPECT_STRNE(s1, s2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2) +#define EXPECT_STRCASEEQ(expected, actual) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, expected, actual) +#define EXPECT_STRCASENE(s1, s2)\ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2) + +#define ASSERT_STREQ(expected, actual) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, expected, actual) +#define ASSERT_STRNE(s1, s2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2) +#define ASSERT_STRCASEEQ(expected, actual) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, expected, actual) +#define ASSERT_STRCASENE(s1, s2)\ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2) + +// Macros for comparing floating-point numbers. +// +// * {ASSERT|EXPECT}_FLOAT_EQ(expected, actual): +// Tests that two float values are almost equal. +// * {ASSERT|EXPECT}_DOUBLE_EQ(expected, actual): +// Tests that two double values are almost equal. +// * {ASSERT|EXPECT}_NEAR(v1, v2, abs_error): +// Tests that v1 and v2 are within the given distance to each other. +// +// Google Test uses ULP-based comparison to automatically pick a default +// error bound that is appropriate for the operands. See the +// FloatingPoint template class in gtest-internal.h if you are +// interested in the implementation details. + +#define EXPECT_FLOAT_EQ(expected, actual)\ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + expected, actual) + +#define EXPECT_DOUBLE_EQ(expected, actual)\ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + expected, actual) + +#define ASSERT_FLOAT_EQ(expected, actual)\ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + expected, actual) + +#define ASSERT_DOUBLE_EQ(expected, actual)\ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + expected, actual) + +#define EXPECT_NEAR(val1, val2, abs_error)\ + EXPECT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \ + val1, val2, abs_error) + +#define ASSERT_NEAR(val1, val2, abs_error)\ + ASSERT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \ + val1, val2, abs_error) + +// These predicate format functions work on floating-point values, and +// can be used in {ASSERT|EXPECT}_PRED_FORMAT2*(), e.g. +// +// EXPECT_PRED_FORMAT2(testing::DoubleLE, Foo(), 5.0); + +// Asserts that val1 is less than, or almost equal to, val2. Fails +// otherwise. In particular, it fails if either val1 or val2 is NaN. +GTEST_API_ AssertionResult FloatLE(const char* expr1, const char* expr2, + float val1, float val2); +GTEST_API_ AssertionResult DoubleLE(const char* expr1, const char* expr2, + double val1, double val2); + + +#if GTEST_OS_WINDOWS + +// Macros that test for HRESULT failure and success, these are only useful +// on Windows, and rely on Windows SDK macros and APIs to compile. +// +// * {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED}(expr) +// +// When expr unexpectedly fails or succeeds, Google Test prints the +// expected result and the actual result with both a human-readable +// string representation of the error, if available, as well as the +// hex result code. +# define EXPECT_HRESULT_SUCCEEDED(expr) \ + EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr)) + +# define ASSERT_HRESULT_SUCCEEDED(expr) \ + ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr)) + +# define EXPECT_HRESULT_FAILED(expr) \ + EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr)) + +# define ASSERT_HRESULT_FAILED(expr) \ + ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr)) + +#endif // GTEST_OS_WINDOWS + +// Macros that execute statement and check that it doesn't generate new fatal +// failures in the current thread. +// +// * {ASSERT|EXPECT}_NO_FATAL_FAILURE(statement); +// +// Examples: +// +// EXPECT_NO_FATAL_FAILURE(Process()); +// ASSERT_NO_FATAL_FAILURE(Process()) << "Process() failed"; +// +#define ASSERT_NO_FATAL_FAILURE(statement) \ + GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_FATAL_FAILURE_) +#define EXPECT_NO_FATAL_FAILURE(statement) \ + GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_NONFATAL_FAILURE_) + +// Causes a trace (including the source file path, the current line +// number, and the given message) to be included in every test failure +// message generated by code in the current scope. The effect is +// undone when the control leaves the current scope. +// +// The message argument can be anything streamable to std::ostream. +// +// In the implementation, we include the current line number as part +// of the dummy variable name, thus allowing multiple SCOPED_TRACE()s +// to appear in the same block - as long as they are on different +// lines. +#define SCOPED_TRACE(message) \ + ::testing::internal::ScopedTrace GTEST_CONCAT_TOKEN_(gtest_trace_, __LINE__)(\ + __FILE__, __LINE__, ::testing::Message() << (message)) + +// Compile-time assertion for type equality. +// StaticAssertTypeEq() compiles iff type1 and type2 are +// the same type. The value it returns is not interesting. +// +// Instead of making StaticAssertTypeEq a class template, we make it a +// function template that invokes a helper class template. This +// prevents a user from misusing StaticAssertTypeEq by +// defining objects of that type. +// +// CAVEAT: +// +// When used inside a method of a class template, +// StaticAssertTypeEq() is effective ONLY IF the method is +// instantiated. For example, given: +// +// template class Foo { +// public: +// void Bar() { testing::StaticAssertTypeEq(); } +// }; +// +// the code: +// +// void Test1() { Foo foo; } +// +// will NOT generate a compiler error, as Foo::Bar() is never +// actually instantiated. Instead, you need: +// +// void Test2() { Foo foo; foo.Bar(); } +// +// to cause a compiler error. +template +bool StaticAssertTypeEq() { + (void)internal::StaticAssertTypeEqHelper(); + return true; +} + +// Defines a test. +// +// The first parameter is the name of the test case, and the second +// parameter is the name of the test within the test case. +// +// The convention is to end the test case name with "Test". For +// example, a test case for the Foo class can be named FooTest. +// +// The user should put his test code between braces after using this +// macro. Example: +// +// TEST(FooTest, InitializesCorrectly) { +// Foo foo; +// EXPECT_TRUE(foo.StatusIsOK()); +// } + +// Note that we call GetTestTypeId() instead of GetTypeId< +// ::testing::Test>() here to get the type ID of testing::Test. This +// is to work around a suspected linker bug when using Google Test as +// a framework on Mac OS X. The bug causes GetTypeId< +// ::testing::Test>() to return different values depending on whether +// the call is from the Google Test framework itself or from user test +// code. GetTestTypeId() is guaranteed to always return the same +// value, as it always calls GetTypeId<>() from the Google Test +// framework. +#define GTEST_TEST(test_case_name, test_name)\ + GTEST_TEST_(test_case_name, test_name, \ + ::testing::Test, ::testing::internal::GetTestTypeId()) + +// Define this macro to 1 to omit the definition of TEST(), which +// is a generic name and clashes with some other libraries. +#if !GTEST_DONT_DEFINE_TEST +# define TEST(test_case_name, test_name) GTEST_TEST(test_case_name, test_name) +#endif + +// Defines a test that uses a test fixture. +// +// The first parameter is the name of the test fixture class, which +// also doubles as the test case name. The second parameter is the +// name of the test within the test case. +// +// A test fixture class must be declared earlier. The user should put +// his test code between braces after using this macro. Example: +// +// class FooTest : public testing::Test { +// protected: +// virtual void SetUp() { b_.AddElement(3); } +// +// Foo a_; +// Foo b_; +// }; +// +// TEST_F(FooTest, InitializesCorrectly) { +// EXPECT_TRUE(a_.StatusIsOK()); +// } +// +// TEST_F(FooTest, ReturnsElementCountCorrectly) { +// EXPECT_EQ(0, a_.size()); +// EXPECT_EQ(1, b_.size()); +// } + +#define TEST_F(test_fixture, test_name)\ + GTEST_TEST_(test_fixture, test_name, test_fixture, \ + ::testing::internal::GetTypeId()) + +} // namespace testing + +// Use this function in main() to run all tests. It returns 0 if all +// tests are successful, or 1 otherwise. +// +// RUN_ALL_TESTS() should be invoked after the command line has been +// parsed by InitGoogleTest(). +// +// This function was formerly a macro; thus, it is in the global +// namespace and has an all-caps name. +int RUN_ALL_TESTS() GTEST_MUST_USE_RESULT_; + +inline int RUN_ALL_TESTS() { + return ::testing::UnitTest::GetInstance()->Run(); +} + +#endif // GTEST_INCLUDE_GTEST_GTEST_H_